From 3fb2b68d5bf5ee66cc7cb80f8c19938c0da406a4 Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Tue, 11 Jan 2022 11:50:04 +0100 Subject: [PATCH 01/14] WIP --- Cargo.lock | 1 + rust/crd/Cargo.toml | 1 + rust/crd/src/lib.rs | 166 +++++++++++++++++++++++++- rust/operator-binary/src/discovery.rs | 166 ++++++++++++++++++++++++++ rust/operator-binary/src/main.rs | 1 + 5 files changed, 331 insertions(+), 4 deletions(-) create mode 100644 rust/operator-binary/src/discovery.rs diff --git a/Cargo.lock b/Cargo.lock index eee1fb70..cb5ae604 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1592,6 +1592,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "snafu", "stackable-operator", "strum", "strum_macros", diff --git a/rust/crd/Cargo.toml b/rust/crd/Cargo.toml index 76a6cd3c..411f7170 100644 --- a/rust/crd/Cargo.toml +++ b/rust/crd/Cargo.toml @@ -13,6 +13,7 @@ stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", semver = "1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +snafu = "0.6" strum = "0.23" strum_macros = "0.23" thiserror = "1.0" diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 28961406..3307ec56 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -1,8 +1,12 @@ use serde::{Deserialize, Serialize}; -use stackable_operator::kube::CustomResource; -use stackable_operator::product_config_utils::{ConfigError, Configuration}; -use stackable_operator::role_utils::Role; -use stackable_operator::schemars::{self, JsonSchema}; +use snafu::{OptionExt, Snafu}; +use stackable_operator::{ + kube::{runtime::reflector::ObjectRef, CustomResource}, + product_config_utils::{ConfigError, Configuration}, + role_utils::{Role, RoleGroupRef}, + schemars::{self, JsonSchema}, +}; +use std::borrow::Borrow; use std::collections::BTreeMap; use std::str::FromStr; use strum_macros::Display; @@ -144,7 +148,30 @@ impl DruidRole { } } +#[derive(Debug, Snafu)] +#[snafu(display("object has no namespace associated"))] +pub struct NoNamespaceError; + +/// Reference to a single `Pod` that is a component of a [`DruidCluster`] +/// +/// Used for service discovery. +pub struct DruidPodRef { + pub namespace: String, + pub role_group_service_name: String, + pub pod_name: String, +} + +impl DruidPodRef { + pub fn fqdn(&self) -> String { + format!( + "{}.{}.{}.svc.cluster.local", + self.pod_name, self.role_group_service_name, self.namespace + ) + } +} + impl DruidCluster { + /// The spec for the given Role pub fn get_role(&self, role: &DruidRole) -> &Role { match role { DruidRole::Coordinator => &self.spec.coordinators, @@ -154,6 +181,71 @@ impl DruidCluster { DruidRole::Router => &self.spec.routers, } } + + /// The name of the role-level load-balanced Kubernetes `Service` + pub fn role_service_name(&self, role: &DruidRole) -> Option { + Some(format!( + "{}-{}", + self.metadata.name.clone()?, + role.to_string() + )) + } + + /// The fully-qualified domain name of the role-level load-balanced Kubernetes `Service` + pub fn role_service_fqdn(&self, role: &DruidRole) -> Option { + Some(format!( + "{}.{}.svc.cluster.local", + self.role_service_name(role)?, + self.metadata.namespace.as_ref()? + )) + } + + /// Metadata about a rolegroup + pub fn rolegroup_ref( + &self, + role: &DruidRole, + group_name: impl Into, + ) -> RoleGroupRef { + RoleGroupRef { + cluster: ObjectRef::from_obj(self), + role: role.to_string(), + role_group: group_name.into(), + } + } + + /// List all pods expected to form the cluster + /// + /// We try to predict the pods here rather than looking at the current cluster state in order to + /// avoid instance churn. For example, regenerating zoo.cfg based on the cluster state would lead to + /// a lot of spurious restarts, as well as opening us up to dangerous split-brain conditions because + /// the pods have inconsistent snapshots of which servers they should expect to be in quorum. + pub fn pods(&self, role: &DruidRole) -> Result, NoNamespaceError> { + let ns = self + .metadata + .namespace + .clone() + .context(NoNamespaceContext)?; + // Order rolegroups consistently, to avoid spurious downstream rewrites + let sorted_rolegroups = self + .get_role(role) + .role_groups + .borrow() + .into_iter() + .collect::>(); + let pods = sorted_rolegroups + .into_iter() + .flat_map(move |(rolegroup_name, rolegroup)| { + let rolegroup_ref = self.rolegroup_ref(role, rolegroup_name); + let ns = ns.clone(); + (0..rolegroup.replicas.unwrap_or(0)).map(move |i| DruidPodRef { + namespace: ns.clone(), + role_group_service_name: rolegroup_ref.object_name(), + pod_name: format!("{}-{}", rolegroup_ref.object_name(), i), + }) + }) + .collect(); + Ok(pods) + } } #[derive(Clone, Debug, Default, Deserialize, JsonSchema, Serialize)] @@ -361,3 +453,69 @@ fn build_string_list(strings: &[String]) -> String { let comma_list = quoted_strings.join(", "); format!("[{}]", comma_list) } + +#[cfg(test)] +mod tests { + use super::*; + use stackable_operator::role_utils::CommonConfiguration; + + #[test] + fn test1() { + let cluster = DruidCluster::new( + "testcluster", + DruidClusterSpec { + stopped: None, + version: "".to_string(), + brokers: Role { + config: CommonConfiguration { + config: DruidConfig, + config_overrides: Default::default(), + env_overrides: Default::default(), + cli_overrides: Default::default(), + }, + role_groups: Default::default(), + }, + coordinators: Role { + config: CommonConfiguration { + config: DruidConfig, + config_overrides: Default::default(), + env_overrides: Default::default(), + cli_overrides: Default::default(), + }, + role_groups: Default::default(), + }, + historicals: Role { + config: CommonConfiguration { + config: DruidConfig, + config_overrides: Default::default(), + env_overrides: Default::default(), + cli_overrides: Default::default(), + }, + role_groups: Default::default(), + }, + middle_managers: Role { + config: CommonConfiguration { + config: DruidConfig, + config_overrides: Default::default(), + env_overrides: Default::default(), + cli_overrides: Default::default(), + }, + role_groups: Default::default(), + }, + routers: Role { + config: CommonConfiguration { + config: DruidConfig, + config_overrides: Default::default(), + env_overrides: Default::default(), + cli_overrides: Default::default(), + }, + role_groups: Default::default(), + }, + metadata_storage_database: Default::default(), + deep_storage: Default::default(), + s3: None, + zookeeper_reference: Default::default(), + }, + ); + } +} diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs new file mode 100644 index 00000000..079aade8 --- /dev/null +++ b/rust/operator-binary/src/discovery.rs @@ -0,0 +1,166 @@ +//! Discovery for Druid. We make Druid discoverable by putting a connection string to the router process +//! inside a config map. We only provide a connection string to the router process, since it serves as +//! a gateway to the cluster for client queries. + +use std::{collections::BTreeSet, num::TryFromIntError}; + +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_druid_crd::{DruidCluster, DruidRole}; +use stackable_operator::{ + builder::{ConfigMapBuilder, ObjectMetaBuilder}, + k8s_openapi::api::core::v1::{ConfigMap, Endpoints, Service}, + kube::{runtime::reflector::ObjectRef, Resource, ResourceExt}, +}; + +use crate::{druid_controller::druid_version, APP_NAME, APP_PORT}; + +#[derive(Snafu, Debug)] +pub enum Error { + #[snafu(display("object {} is missing metadata to build owner reference", zk))] + ObjectMissingMetadataForOwnerRef { + source: stackable_operator::error::Error, + zk: ObjectRef, + }, + #[snafu(display("chroot path {} was relative (must be absolute)", chroot))] + RelativeChroot { chroot: String }, + #[snafu(display("object has no name associated"))] + NoName, + #[snafu(display("object has no namespace associated"))] + NoNamespace, + #[snafu(display("failed to list expected pods"))] + ExpectedPods { + source: stackable_zookeeper_crd::NoNamespaceError, + }, + #[snafu(display("could not find service port with name {}", port_name))] + NoServicePort { port_name: String }, + #[snafu(display("service port with name {} does not have a nodePort", port_name))] + NoNodePort { port_name: String }, + #[snafu(display("could not find Endpoints for {}", svc))] + FindEndpoints { + source: stackable_operator::error::Error, + svc: ObjectRef, + }, + #[snafu(display("nodePort was out of range"))] + InvalidNodePort { source: TryFromIntError }, + #[snafu(display("failed to build ConfigMap"))] + BuildConfigMap { + source: stackable_operator::error::Error, + }, +} + +/// Builds discovery [`ConfigMap`]s for connecting to a [`DruidCluster`] for all expected scenarios +pub async fn build_discovery_configmaps( + client: &stackable_operator::client::Client, + owner: &impl Resource, + druid: &DruidCluster, + svc: &Service, + chroot: Option<&str>, +) -> Result, Error> { + let name = owner.name(); + Ok(vec![ + build_discovery_configmap(&name, owner, druid, chroot, pod_hosts(zk)?)?, + build_discovery_configmap( + &format!("{}-nodeport", name), + owner, + zk, + chroot, + nodeport_hosts(client, svc, "zk").await?, + )?, + ]) +} + +/// Build a discovery [`ConfigMap`] containing information about how to connect to a certain [`ZookeeperCluster`] +/// +/// `hosts` will usually come from either [`pod_hosts`] or [`nodeport_hosts`]. +fn build_discovery_configmap( + name: &str, + owner: &impl Resource, + zk: &ZookeeperCluster, + chroot: Option<&str>, + hosts: impl IntoIterator, u16)>, +) -> Result { + // Write a connection string of the format that Java ZooKeeper client expects: + // "{host1}:{port1},{host2:port2},.../{chroot}" + // See https://zookeeper.apache.org/doc/current/apidocs/zookeeper-server/org/apache/zookeeper/ZooKeeper.html#ZooKeeper-java.lang.String-int-org.apache.zookeeper.Watcher- + let mut conn_str = hosts + .into_iter() + .map(|(host, port)| format!("{}:{}", host.into(), port)) + .collect::>() + .join(","); + if let Some(chroot) = chroot { + if !chroot.starts_with('/') { + return RelativeChroot { chroot }.fail(); + } + conn_str.push_str(chroot); + } + ConfigMapBuilder::new() + .metadata( + ObjectMetaBuilder::new() + .name_and_namespace(zk) + .name(name) + .ownerreference_from_resource(owner, None, Some(true)) + .with_context(|| ObjectMissingMetadataForOwnerRef { + zk: ObjectRef::from_obj(zk), + })? + .with_recommended_labels( + zk, + APP_NAME, + zk_version(zk).unwrap_or("unknown"), + &ZookeeperRole::Server.to_string(), + "discovery", + ) + .build(), + ) + .add_data("ZOOKEEPER", conn_str) + .build() + .context(BuildConfigMap) +} + +/// Lists all Pods FQDNs expected to host the [`ZookeeperCluster`] +fn pod_hosts(zk: &ZookeeperCluster) -> Result + '_, Error> { + Ok(zk + .pods() + .context(ExpectedPods)? + .into_iter() + .map(|pod_ref| (pod_ref.fqdn(), APP_PORT))) +} + +/// Lists all nodes currently hosting Pods participating in the [`Service`] +async fn nodeport_hosts( + client: &stackable_operator::client::Client, + svc: &Service, + port_name: &str, +) -> Result, Error> { + let svc_port = svc + .spec + .as_ref() + .and_then(|svc_spec| { + svc_spec + .ports + .as_ref()? + .iter() + .find(|port| port.name.as_deref() == Some(port_name)) + }) + .context(NoServicePort { port_name })?; + let node_port = svc_port.node_port.context(NoNodePort { port_name })?; + let endpoints = client + .get::( + svc.metadata.name.as_deref().context(NoName)?, + svc.metadata.namespace.as_deref(), + ) + .await + .with_context(|| FindEndpoints { + svc: ObjectRef::from_obj(svc), + })?; + let nodes = endpoints + .subsets + .into_iter() + .flatten() + .flat_map(|subset| subset.addresses) + .flatten() + .flat_map(|addr| addr.node_name); + let addrs = nodes + .map(|node| Ok((node, node_port.try_into().context(InvalidNodePort)?))) + .collect::, _>>()?; + Ok(addrs) +} diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 2ad5f320..d1320a72 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -1,4 +1,5 @@ mod config; +// mod discovery; mod druid_controller; use futures::StreamExt; From 68312c079ff06a7ffd977513cbbe7817bf7fd913 Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Tue, 11 Jan 2022 15:09:34 +0100 Subject: [PATCH 02/14] Added pods() function and test --- rust/crd/src/lib.rs | 56 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 49 insertions(+), 7 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 3307ec56..70b0b497 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -155,6 +155,7 @@ pub struct NoNamespaceError; /// Reference to a single `Pod` that is a component of a [`DruidCluster`] /// /// Used for service discovery. +#[derive(Debug, PartialEq, Eq)] pub struct DruidPodRef { pub namespace: String, pub role_group_service_name: String, @@ -458,17 +459,20 @@ fn build_string_list(strings: &[String]) -> String { mod tests { use super::*; use stackable_operator::role_utils::CommonConfiguration; + use stackable_operator::role_utils::RoleGroup; + use std::array::IntoIter; + use std::collections::HashMap; #[test] fn test1() { - let cluster = DruidCluster::new( + let mut cluster = DruidCluster::new( "testcluster", DruidClusterSpec { stopped: None, version: "".to_string(), brokers: Role { config: CommonConfiguration { - config: DruidConfig, + config: DruidConfig {}, config_overrides: Default::default(), env_overrides: Default::default(), cli_overrides: Default::default(), @@ -477,7 +481,7 @@ mod tests { }, coordinators: Role { config: CommonConfiguration { - config: DruidConfig, + config: DruidConfig {}, config_overrides: Default::default(), env_overrides: Default::default(), cli_overrides: Default::default(), @@ -486,7 +490,7 @@ mod tests { }, historicals: Role { config: CommonConfiguration { - config: DruidConfig, + config: DruidConfig {}, config_overrides: Default::default(), env_overrides: Default::default(), cli_overrides: Default::default(), @@ -495,7 +499,7 @@ mod tests { }, middle_managers: Role { config: CommonConfiguration { - config: DruidConfig, + config: DruidConfig {}, config_overrides: Default::default(), env_overrides: Default::default(), cli_overrides: Default::default(), @@ -504,12 +508,24 @@ mod tests { }, routers: Role { config: CommonConfiguration { - config: DruidConfig, + config: DruidConfig {}, config_overrides: Default::default(), env_overrides: Default::default(), cli_overrides: Default::default(), }, - role_groups: Default::default(), + role_groups: HashMap::<_, _>::from_iter(IntoIter::new([( + "default".to_string(), + RoleGroup { + config: CommonConfiguration { + config: DruidConfig {}, + config_overrides: Default::default(), + env_overrides: Default::default(), + cli_overrides: Default::default(), + }, + replicas: Some(1), + selector: None, + }, + )])), }, metadata_storage_database: Default::default(), deep_storage: Default::default(), @@ -517,5 +533,31 @@ mod tests { zookeeper_reference: Default::default(), }, ); + + cluster.metadata.namespace = Some("default".to_string()); + + assert_eq!(cluster.metadata.name, Some("testcluster".to_string())); + + assert_eq!( + cluster.role_service_name(&DruidRole::Router), + Some("testcluster-router".to_string()) + ); + + match cluster.pods(&DruidRole::Router) { + Ok(pods) => assert_eq!( + pods, + vec![DruidPodRef { + namespace: "default".to_string(), + role_group_service_name: "testcluster-router-default".to_string(), + pod_name: "testcluster-router-default-0".to_string() + }] + ), + Err(e) => { + panic!( + "There as an error fetching the pod names for the cluster: {}", + e + ); + } + } } } From 301541d2231751dc50e85dc94d763f68b7d86acb Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Tue, 11 Jan 2022 16:36:40 +0100 Subject: [PATCH 03/14] First discovery CM gets written now --- examples/derby/druidcluster.yaml | 2 +- examples/psql-s3/druidcluster.yaml | 2 +- examples/psql/druidcluster.yaml | 2 +- rust/operator-binary/src/discovery.rs | 115 ++++++------------- rust/operator-binary/src/druid_controller.rs | 22 +++- rust/operator-binary/src/main.rs | 2 +- 6 files changed, 60 insertions(+), 85 deletions(-) diff --git a/examples/derby/druidcluster.yaml b/examples/derby/druidcluster.yaml index c7cb6a90..7758d010 100644 --- a/examples/derby/druidcluster.yaml +++ b/examples/derby/druidcluster.yaml @@ -1,7 +1,7 @@ apiVersion: druid.stackable.tech/v1alpha1 kind: DruidCluster metadata: - name: simple + name: derby-druid spec: version: 0.22.0 zookeeperReference: diff --git a/examples/psql-s3/druidcluster.yaml b/examples/psql-s3/druidcluster.yaml index d915cbec..c71f9f95 100644 --- a/examples/psql-s3/druidcluster.yaml +++ b/examples/psql-s3/druidcluster.yaml @@ -1,7 +1,7 @@ apiVersion: druid.stackable.tech/v1alpha1 kind: DruidCluster metadata: - name: simple + name: psqls3-druid spec: version: 0.22.0 zookeeperReference: diff --git a/examples/psql/druidcluster.yaml b/examples/psql/druidcluster.yaml index 23707fc2..c7c4d232 100644 --- a/examples/psql/druidcluster.yaml +++ b/examples/psql/druidcluster.yaml @@ -1,7 +1,7 @@ apiVersion: druid.stackable.tech/v1alpha1 kind: DruidCluster metadata: - name: simple + name: psql-druid spec: version: 0.22.0 zookeeperReference: diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index 079aade8..0aacf9cd 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -5,21 +5,21 @@ use std::{collections::BTreeSet, num::TryFromIntError}; use snafu::{OptionExt, ResultExt, Snafu}; -use stackable_druid_crd::{DruidCluster, DruidRole}; +use stackable_druid_crd::{DruidCluster, DruidRole, APP_NAME}; use stackable_operator::{ builder::{ConfigMapBuilder, ObjectMetaBuilder}, k8s_openapi::api::core::v1::{ConfigMap, Endpoints, Service}, kube::{runtime::reflector::ObjectRef, Resource, ResourceExt}, }; -use crate::{druid_controller::druid_version, APP_NAME, APP_PORT}; +use crate::druid_controller::druid_version; #[derive(Snafu, Debug)] pub enum Error { - #[snafu(display("object {} is missing metadata to build owner reference", zk))] + #[snafu(display("object {} is missing metadata to build owner reference", druid))] ObjectMissingMetadataForOwnerRef { source: stackable_operator::error::Error, - zk: ObjectRef, + druid: ObjectRef, }, #[snafu(display("chroot path {} was relative (must be absolute)", chroot))] RelativeChroot { chroot: String }, @@ -29,8 +29,10 @@ pub enum Error { NoNamespace, #[snafu(display("failed to list expected pods"))] ExpectedPods { - source: stackable_zookeeper_crd::NoNamespaceError, + source: stackable_druid_crd::NoNamespaceError, }, + #[snafu(display("failed to get service FQDN"))] + NoServiceFqdn, #[snafu(display("could not find service port with name {}", port_name))] NoServicePort { port_name: String }, #[snafu(display("service port with name {} does not have a nodePort", port_name))] @@ -53,114 +55,67 @@ pub async fn build_discovery_configmaps( client: &stackable_operator::client::Client, owner: &impl Resource, druid: &DruidCluster, - svc: &Service, - chroot: Option<&str>, ) -> Result, Error> { let name = owner.name(); - Ok(vec![ - build_discovery_configmap(&name, owner, druid, chroot, pod_hosts(zk)?)?, - build_discovery_configmap( - &format!("{}-nodeport", name), - owner, - zk, - chroot, - nodeport_hosts(client, svc, "zk").await?, - )?, - ]) + Ok(vec![build_discovery_configmap(&name, owner, druid)?]) } -/// Build a discovery [`ConfigMap`] containing information about how to connect to a certain [`ZookeeperCluster`] +/// Build a discovery [`ConfigMap`] containing information about how to connect to a certain [`DruidCluster`] /// /// `hosts` will usually come from either [`pod_hosts`] or [`nodeport_hosts`]. fn build_discovery_configmap( name: &str, owner: &impl Resource, - zk: &ZookeeperCluster, - chroot: Option<&str>, - hosts: impl IntoIterator, u16)>, + druid: &DruidCluster, ) -> Result { - // Write a connection string of the format that Java ZooKeeper client expects: - // "{host1}:{port1},{host2:port2},.../{chroot}" - // See https://zookeeper.apache.org/doc/current/apidocs/zookeeper-server/org/apache/zookeeper/ZooKeeper.html#ZooKeeper-java.lang.String-int-org.apache.zookeeper.Watcher- + // TODO: Question: What does the connection string look like for druid? Online it says to connect + // to the Broker (or probably router) host. But we could maybe have multiple of those, + // How can we support multiple hosts? Is it fine to just provide the service domain? + // I tried that and it seems to have worked. Testing with multiple routers would be necessary. + let conn_str = format!( + "druid://{}:{}/druid/v2/sql", + druid + .role_service_fqdn(&DruidRole::Router) + .with_context(|| NoServiceFqdn)?, + DruidRole::Router.get_http_port() + ); + /* let mut conn_str = hosts .into_iter() .map(|(host, port)| format!("{}:{}", host.into(), port)) .collect::>() .join(","); - if let Some(chroot) = chroot { - if !chroot.starts_with('/') { - return RelativeChroot { chroot }.fail(); - } - conn_str.push_str(chroot); - } + */ ConfigMapBuilder::new() .metadata( ObjectMetaBuilder::new() - .name_and_namespace(zk) + .name_and_namespace(druid) .name(name) .ownerreference_from_resource(owner, None, Some(true)) .with_context(|| ObjectMissingMetadataForOwnerRef { - zk: ObjectRef::from_obj(zk), + druid: ObjectRef::from_obj(druid), })? .with_recommended_labels( - zk, + druid, APP_NAME, - zk_version(zk).unwrap_or("unknown"), - &ZookeeperRole::Server.to_string(), + druid_version(druid).unwrap_or("unknown"), + &DruidRole::Router.to_string(), "discovery", ) .build(), ) - .add_data("ZOOKEEPER", conn_str) + .add_data("DRUID", conn_str) .build() .context(BuildConfigMap) } +/* /// Lists all Pods FQDNs expected to host the [`ZookeeperCluster`] -fn pod_hosts(zk: &ZookeeperCluster) -> Result + '_, Error> { - Ok(zk - .pods() +fn pod_hosts(druid: &DruidCluster) -> Result + '_, Error> { + Ok(druid + .pods(&DruidRole::Router) .context(ExpectedPods)? .into_iter() - .map(|pod_ref| (pod_ref.fqdn(), APP_PORT))) -} - -/// Lists all nodes currently hosting Pods participating in the [`Service`] -async fn nodeport_hosts( - client: &stackable_operator::client::Client, - svc: &Service, - port_name: &str, -) -> Result, Error> { - let svc_port = svc - .spec - .as_ref() - .and_then(|svc_spec| { - svc_spec - .ports - .as_ref()? - .iter() - .find(|port| port.name.as_deref() == Some(port_name)) - }) - .context(NoServicePort { port_name })?; - let node_port = svc_port.node_port.context(NoNodePort { port_name })?; - let endpoints = client - .get::( - svc.metadata.name.as_deref().context(NoName)?, - svc.metadata.namespace.as_deref(), - ) - .await - .with_context(|| FindEndpoints { - svc: ObjectRef::from_obj(svc), - })?; - let nodes = endpoints - .subsets - .into_iter() - .flatten() - .flat_map(|subset| subset.addresses) - .flatten() - .flat_map(|addr| addr.node_name); - let addrs = nodes - .map(|node| Ok((node, node_port.try_into().context(InvalidNodePort)?))) - .collect::, _>>()?; - Ok(addrs) + .map(|pod_ref| (pod_ref.fqdn(), DruidRole::Router.get_http_port()))) } +*/ diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index 0bd97f93..e07563b2 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -6,7 +6,10 @@ use std::{ time::Duration, }; -use crate::config::{get_jvm_config, get_log4j_config, get_runtime_properties}; +use crate::{ + config::{get_jvm_config, get_log4j_config, get_runtime_properties}, + discovery::{self, build_discovery_configmaps}, +}; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_druid_crd::{ DeepStorageType, DruidCluster, DruidRole, APP_NAME, CONTAINER_HTTP_PORT, @@ -114,6 +117,12 @@ pub enum Error { PropertiesWriteError { source: stackable_operator::product_config::writer::PropertiesWriterError, }, + #[snafu(display("failed to build discovery ConfigMap"))] + BuildDiscoveryConfig { source: discovery::Error }, + #[snafu(display("failed to apply discovery ConfigMap"))] + ApplyDiscoveryConfig { + source: stackable_operator::error::Error, + }, } type Result = std::result::Result; @@ -210,6 +219,17 @@ pub async fn reconcile_druid(druid: DruidCluster, ctx: Context) -> Result Date: Wed, 12 Jan 2022 09:45:55 +0100 Subject: [PATCH 04/14] removed dead code again --- rust/crd/src/lib.rs | 66 +------------------- rust/operator-binary/src/druid_controller.rs | 4 +- 2 files changed, 3 insertions(+), 67 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 70b0b497..2f7b919b 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -200,53 +200,6 @@ impl DruidCluster { self.metadata.namespace.as_ref()? )) } - - /// Metadata about a rolegroup - pub fn rolegroup_ref( - &self, - role: &DruidRole, - group_name: impl Into, - ) -> RoleGroupRef { - RoleGroupRef { - cluster: ObjectRef::from_obj(self), - role: role.to_string(), - role_group: group_name.into(), - } - } - - /// List all pods expected to form the cluster - /// - /// We try to predict the pods here rather than looking at the current cluster state in order to - /// avoid instance churn. For example, regenerating zoo.cfg based on the cluster state would lead to - /// a lot of spurious restarts, as well as opening us up to dangerous split-brain conditions because - /// the pods have inconsistent snapshots of which servers they should expect to be in quorum. - pub fn pods(&self, role: &DruidRole) -> Result, NoNamespaceError> { - let ns = self - .metadata - .namespace - .clone() - .context(NoNamespaceContext)?; - // Order rolegroups consistently, to avoid spurious downstream rewrites - let sorted_rolegroups = self - .get_role(role) - .role_groups - .borrow() - .into_iter() - .collect::>(); - let pods = sorted_rolegroups - .into_iter() - .flat_map(move |(rolegroup_name, rolegroup)| { - let rolegroup_ref = self.rolegroup_ref(role, rolegroup_name); - let ns = ns.clone(); - (0..rolegroup.replicas.unwrap_or(0)).map(move |i| DruidPodRef { - namespace: ns.clone(), - role_group_service_name: rolegroup_ref.object_name(), - pod_name: format!("{}-{}", rolegroup_ref.object_name(), i), - }) - }) - .collect(); - Ok(pods) - } } #[derive(Clone, Debug, Default, Deserialize, JsonSchema, Serialize)] @@ -464,7 +417,7 @@ mod tests { use std::collections::HashMap; #[test] - fn test1() { + fn test_service_name_generation() { let mut cluster = DruidCluster::new( "testcluster", DruidClusterSpec { @@ -542,22 +495,5 @@ mod tests { cluster.role_service_name(&DruidRole::Router), Some("testcluster-router".to_string()) ); - - match cluster.pods(&DruidRole::Router) { - Ok(pods) => assert_eq!( - pods, - vec![DruidPodRef { - namespace: "default".to_string(), - role_group_service_name: "testcluster-router-default".to_string(), - pod_name: "testcluster-router-default-0".to_string() - }] - ), - Err(e) => { - panic!( - "There as an error fetching the pod names for the cluster: {}", - e - ); - } - } } } diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index e07563b2..3a782233 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -255,12 +255,12 @@ pub fn build_role_service(role_name: &str, druid: &DruidCluster) -> Result Date: Wed, 12 Jan 2022 09:54:36 +0100 Subject: [PATCH 05/14] cleanup --- rust/crd/src/lib.rs | 29 ++---------------- rust/operator-binary/src/discovery.rs | 32 ++------------------ rust/operator-binary/src/druid_controller.rs | 4 +-- 3 files changed, 7 insertions(+), 58 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 2f7b919b..59309d8b 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -1,12 +1,10 @@ use serde::{Deserialize, Serialize}; -use snafu::{OptionExt, Snafu}; use stackable_operator::{ - kube::{runtime::reflector::ObjectRef, CustomResource}, + kube::CustomResource, product_config_utils::{ConfigError, Configuration}, - role_utils::{Role, RoleGroupRef}, + role_utils::Role, schemars::{self, JsonSchema}, }; -use std::borrow::Borrow; use std::collections::BTreeMap; use std::str::FromStr; use strum_macros::Display; @@ -148,29 +146,6 @@ impl DruidRole { } } -#[derive(Debug, Snafu)] -#[snafu(display("object has no namespace associated"))] -pub struct NoNamespaceError; - -/// Reference to a single `Pod` that is a component of a [`DruidCluster`] -/// -/// Used for service discovery. -#[derive(Debug, PartialEq, Eq)] -pub struct DruidPodRef { - pub namespace: String, - pub role_group_service_name: String, - pub pod_name: String, -} - -impl DruidPodRef { - pub fn fqdn(&self) -> String { - format!( - "{}.{}.{}.svc.cluster.local", - self.pod_name, self.role_group_service_name, self.namespace - ) - } -} - impl DruidCluster { /// The spec for the given Role pub fn get_role(&self, role: &DruidRole) -> &Role { diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index 0aacf9cd..8868fc95 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -2,13 +2,13 @@ //! inside a config map. We only provide a connection string to the router process, since it serves as //! a gateway to the cluster for client queries. -use std::{collections::BTreeSet, num::TryFromIntError}; +use std::num::TryFromIntError; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_druid_crd::{DruidCluster, DruidRole, APP_NAME}; use stackable_operator::{ builder::{ConfigMapBuilder, ObjectMetaBuilder}, - k8s_openapi::api::core::v1::{ConfigMap, Endpoints, Service}, + k8s_openapi::api::core::v1::{ConfigMap, Service}, kube::{runtime::reflector::ObjectRef, Resource, ResourceExt}, }; @@ -27,10 +27,6 @@ pub enum Error { NoName, #[snafu(display("object has no namespace associated"))] NoNamespace, - #[snafu(display("failed to list expected pods"))] - ExpectedPods { - source: stackable_druid_crd::NoNamespaceError, - }, #[snafu(display("failed to get service FQDN"))] NoServiceFqdn, #[snafu(display("could not find service port with name {}", port_name))] @@ -52,7 +48,6 @@ pub enum Error { /// Builds discovery [`ConfigMap`]s for connecting to a [`DruidCluster`] for all expected scenarios pub async fn build_discovery_configmaps( - client: &stackable_operator::client::Client, owner: &impl Resource, druid: &DruidCluster, ) -> Result, Error> { @@ -68,10 +63,6 @@ fn build_discovery_configmap( owner: &impl Resource, druid: &DruidCluster, ) -> Result { - // TODO: Question: What does the connection string look like for druid? Online it says to connect - // to the Broker (or probably router) host. But we could maybe have multiple of those, - // How can we support multiple hosts? Is it fine to just provide the service domain? - // I tried that and it seems to have worked. Testing with multiple routers would be necessary. let conn_str = format!( "druid://{}:{}/druid/v2/sql", druid @@ -79,13 +70,7 @@ fn build_discovery_configmap( .with_context(|| NoServiceFqdn)?, DruidRole::Router.get_http_port() ); - /* - let mut conn_str = hosts - .into_iter() - .map(|(host, port)| format!("{}:{}", host.into(), port)) - .collect::>() - .join(","); - */ + ConfigMapBuilder::new() .metadata( ObjectMetaBuilder::new() @@ -108,14 +93,3 @@ fn build_discovery_configmap( .build() .context(BuildConfigMap) } - -/* -/// Lists all Pods FQDNs expected to host the [`ZookeeperCluster`] -fn pod_hosts(druid: &DruidCluster) -> Result + '_, Error> { - Ok(druid - .pods(&DruidRole::Router) - .context(ExpectedPods)? - .into_iter() - .map(|pod_ref| (pod_ref.fqdn(), DruidRole::Router.get_http_port()))) -} -*/ diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index 3a782233..66fc4f87 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -220,11 +220,11 @@ pub async fn reconcile_druid(druid: DruidCluster, ctx: Context) -> Result Date: Wed, 12 Jan 2022 11:15:13 +0100 Subject: [PATCH 06/14] Removed unused errors --- Cargo.lock | 1 - rust/crd/Cargo.toml | 1 - rust/operator-binary/src/discovery.rs | 23 +---------------------- 3 files changed, 1 insertion(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb5ae604..eee1fb70 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1592,7 +1592,6 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "snafu", "stackable-operator", "strum", "strum_macros", diff --git a/rust/crd/Cargo.toml b/rust/crd/Cargo.toml index 411f7170..76a6cd3c 100644 --- a/rust/crd/Cargo.toml +++ b/rust/crd/Cargo.toml @@ -13,7 +13,6 @@ stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", semver = "1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -snafu = "0.6" strum = "0.23" strum_macros = "0.23" thiserror = "1.0" diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index 8868fc95..cb64ba23 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -2,13 +2,11 @@ //! inside a config map. We only provide a connection string to the router process, since it serves as //! a gateway to the cluster for client queries. -use std::num::TryFromIntError; - use snafu::{OptionExt, ResultExt, Snafu}; use stackable_druid_crd::{DruidCluster, DruidRole, APP_NAME}; use stackable_operator::{ builder::{ConfigMapBuilder, ObjectMetaBuilder}, - k8s_openapi::api::core::v1::{ConfigMap, Service}, + k8s_openapi::api::core::v1::ConfigMap, kube::{runtime::reflector::ObjectRef, Resource, ResourceExt}, }; @@ -21,25 +19,8 @@ pub enum Error { source: stackable_operator::error::Error, druid: ObjectRef, }, - #[snafu(display("chroot path {} was relative (must be absolute)", chroot))] - RelativeChroot { chroot: String }, - #[snafu(display("object has no name associated"))] - NoName, - #[snafu(display("object has no namespace associated"))] - NoNamespace, #[snafu(display("failed to get service FQDN"))] NoServiceFqdn, - #[snafu(display("could not find service port with name {}", port_name))] - NoServicePort { port_name: String }, - #[snafu(display("service port with name {} does not have a nodePort", port_name))] - NoNodePort { port_name: String }, - #[snafu(display("could not find Endpoints for {}", svc))] - FindEndpoints { - source: stackable_operator::error::Error, - svc: ObjectRef, - }, - #[snafu(display("nodePort was out of range"))] - InvalidNodePort { source: TryFromIntError }, #[snafu(display("failed to build ConfigMap"))] BuildConfigMap { source: stackable_operator::error::Error, @@ -56,8 +37,6 @@ pub async fn build_discovery_configmaps( } /// Build a discovery [`ConfigMap`] containing information about how to connect to a certain [`DruidCluster`] -/// -/// `hosts` will usually come from either [`pod_hosts`] or [`nodeport_hosts`]. fn build_discovery_configmap( name: &str, owner: &impl Resource, From 867bbc514b3e379467349d897f368d09bc06cad2 Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Wed, 12 Jan 2022 11:20:19 +0100 Subject: [PATCH 07/14] Added another test --- rust/crd/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 59309d8b..e2130f59 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -470,5 +470,10 @@ mod tests { cluster.role_service_name(&DruidRole::Router), Some("testcluster-router".to_string()) ); + + assert_eq!( + cluster.role_service_fqdn(&DruidRole::Router), + Some("testcluster-router.default.svc.cluster.local".to_string()) + ) } } From 25c044a63e5daefb2fd4fa82ff4c1624d1cfcafc Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Wed, 12 Jan 2022 11:25:15 +0100 Subject: [PATCH 08/14] minor stuff --- rust/operator-binary/src/discovery.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index cb64ba23..eca8a9ed 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -1,5 +1,5 @@ -//! Discovery for Druid. We make Druid discoverable by putting a connection string to the router process -//! inside a config map. We only provide a connection string to the router process, since it serves as +//! Discovery for Druid. We make Druid discoverable by putting a connection string to the router service +//! inside a config map. We only provide a connection string to the router service, since it serves as //! a gateway to the cluster for client queries. use snafu::{OptionExt, ResultExt, Snafu}; @@ -27,7 +27,7 @@ pub enum Error { }, } -/// Builds discovery [`ConfigMap`]s for connecting to a [`DruidCluster`] for all expected scenarios +/// Builds discovery [`ConfigMap`]s for connecting to a [`DruidCluster`] pub async fn build_discovery_configmaps( owner: &impl Resource, druid: &DruidCluster, From 32b2a027c044928d0f51d431ee7ae29b715949c1 Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Wed, 12 Jan 2022 11:27:56 +0100 Subject: [PATCH 09/14] Updated changelog --- CHANGELOG.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a3c20f5..698f9efd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,8 +4,17 @@ All notable changes to this project will be documented in this file. ## [Unreleased] -## [0.2.0] - 2021-12-23 +### Changed + +- Fixed a port reference in the role services ([#102]) + +### Added +- Added the discovery ConfigMap creation ([#102]) + +[#102]: https://github.com/stackabletech/druid-operator/pull/102 + +## [0.2.0] - 2021-12-23 ### Changed From f5b0edf59797572a84b58645f5a95b3270ff938a Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Wed, 12 Jan 2022 12:10:03 +0100 Subject: [PATCH 10/14] Write more different things into the discovery CM --- rust/operator-binary/src/discovery.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index eca8a9ed..9023144b 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -42,13 +42,18 @@ fn build_discovery_configmap( owner: &impl Resource, druid: &DruidCluster, ) -> Result { - let conn_str = format!( - "druid://{}:{}/druid/v2/sql", + let router_host = format!( + "{}:{}", druid .role_service_fqdn(&DruidRole::Router) .with_context(|| NoServiceFqdn)?, DruidRole::Router.get_http_port() ); + let sqalchemy_conn_str = format!("druid://{}/druid/v2/sql", router_host); + let avatica_conn_str = format!( + "jdbc:avatica:remote:url=http://{}/druid/v2/sql/avatica/", + router_host + ); ConfigMapBuilder::new() .metadata( @@ -68,7 +73,9 @@ fn build_discovery_configmap( ) .build(), ) - .add_data("DRUID", conn_str) + .add_data("DRUID_ROUTER", router_host) + .add_data("DRUID_SQALCHEMY", sqalchemy_conn_str) + .add_data("DRUID_AVATICA_JDBC", avatica_conn_str) .build() .context(BuildConfigMap) } From 5d262ccd766ca0d7378a7b8821a5d5bd5b764bac Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Wed, 12 Jan 2022 13:37:01 +0100 Subject: [PATCH 11/14] Updated documentation --- docs/modules/ROOT/pages/usage.adoc | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/docs/modules/ROOT/pages/usage.adoc b/docs/modules/ROOT/pages/usage.adoc index ff14b09e..053a0bb6 100644 --- a/docs/modules/ROOT/pages/usage.adoc +++ b/docs/modules/ROOT/pages/usage.adoc @@ -37,7 +37,7 @@ Then a cluster can be deployed using the example below. Make sure you have *exac apiVersion: druid.stackable.tech/v1alpha1 kind: DruidCluster metadata: - name: simple + name: simple-druid spec: version: 0.22.0 zookeeperReference: simple-zk @@ -79,7 +79,7 @@ spec: config: {} replicas: 1 -The Router is hosting the web UI, a `NodePort` service is created by the operator to access the web UI. Connect to the `simple-router` `NodePort` service and follow the https://druid.apache.org/docs/latest/tutorials/index.html#step-4-load-data[druid documentation] on how to load and query sample data. +The Router is hosting the web UI, a `NodePort` service is created by the operator to access the web UI. Connect to the `simple-druid-router` `NodePort` service and follow the https://druid.apache.org/docs/latest/tutorials/index.html#step-4-load-data[druid documentation] on how to load and query sample data. == Using S3 @@ -106,4 +106,12 @@ This allows to ingest data from accessible buckets already. To configure a bucke deepStorage: storageType: s3 bucket: druid-deepstorage - baseKey: storage # the base key is the prefix to be used; optional \ No newline at end of file + baseKey: storage # the base key is the prefix to be used; optional + +== Connecting to Druid from other Services + +The operator creates a `ConfigMap` with the name of the cluster which contains connection information. Following our example above (the name of the cluster is `simple-druid`) a `ConfigMap` with the name `simple-druid` will be created containing 3 keys: + +- `DRUID_ROUTER` with the format `:`, which points to the router processes HTTP endpoint. Here you can connect to the web UI, or use REST endpoints such as `/druid/v2/sql/` to query data. https://druid.apache.org/docs/latest/querying/sql.html#http-post[More information in the Druid Docs]. +- `DRUID_AVATICA_JDBC` contains a JDBC connect string which can be used together with the https://calcite.apache.org/avatica/downloads/[Avatica JDBC Driver] to connect to Druid and query data. https://druid.apache.org/docs/latest/querying/sql.html#jdbc[More information in the Druid Docs]. +- `DRUID_SQALCHEMY` contains a connection string used to connect to Druid with SQAlchemy, in - for example - Apache Superset. From fcf9fd920718078377aa37893f5c072cedad5f38 Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Thu, 13 Jan 2022 13:38:06 +0100 Subject: [PATCH 12/14] Fixed typo --- rust/operator-binary/src/discovery.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index 9023144b..f16647a7 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -49,7 +49,7 @@ fn build_discovery_configmap( .with_context(|| NoServiceFqdn)?, DruidRole::Router.get_http_port() ); - let sqalchemy_conn_str = format!("druid://{}/druid/v2/sql", router_host); + let sqlalchemy_conn_str = format!("druid://{}/druid/v2/sql", router_host); let avatica_conn_str = format!( "jdbc:avatica:remote:url=http://{}/druid/v2/sql/avatica/", router_host @@ -74,7 +74,7 @@ fn build_discovery_configmap( .build(), ) .add_data("DRUID_ROUTER", router_host) - .add_data("DRUID_SQALCHEMY", sqalchemy_conn_str) + .add_data("DRUID_SQLALCHEMY", sqlalchemy_conn_str) .add_data("DRUID_AVATICA_JDBC", avatica_conn_str) .build() .context(BuildConfigMap) From 440bb2e574e3f11a352d22c61803f0385a429462 Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Fri, 14 Jan 2022 12:06:00 +0100 Subject: [PATCH 13/14] clippy fixes --- rust/crd/src/lib.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index e2130f59..5bc8e6c4 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -160,11 +160,7 @@ impl DruidCluster { /// The name of the role-level load-balanced Kubernetes `Service` pub fn role_service_name(&self, role: &DruidRole) -> Option { - Some(format!( - "{}-{}", - self.metadata.name.clone()?, - role.to_string() - )) + Some(format!("{}-{}", self.metadata.name.clone()?, role)) } /// The fully-qualified domain name of the role-level load-balanced Kubernetes `Service` From 3e06afd5110a004639e08f8c57fc61308626c5ea Mon Sep 17 00:00:00 2001 From: Felix Hennig Date: Fri, 14 Jan 2022 14:00:42 +0100 Subject: [PATCH 14/14] clippy fixes --- rust/operator-binary/src/druid_controller.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index 943170f9..f5f79962 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -571,7 +571,7 @@ fn container_image(version: &str) -> String { // image during restarts depending on the imagePullPolicy. // TODO: should be made configurable "docker.stackable.tech/stackable/druid:{}-stackable{}", - version.to_string(), + version, DEFAULT_IMAGE_VERSION ) }