diff --git a/solo/README.md b/solo/README.md index f2b116d1d..22aeffcaa 100644 --- a/solo/README.md +++ b/solo/README.md @@ -38,7 +38,7 @@ $ kubectx ``` * For a local cluster, you may use [kind](https://kind.sigs.k8s.io/) and [kubectl](https://kubernetes.io/docs/tasks/tools/) to create a cluster and namespace as below. - * In this case, ensure your Docker has enough resources (e.g. Memory >=8Gb, CPU: >=4). + * In this case, ensure your Docker engine has enough resources (e.g. Memory >=8Gb, CPU: >=4). ``` $ export SOLO_CLUSTER_NAME=solo @@ -150,6 +150,7 @@ hedera-node0.key hedera-node1.key hedera-node2.key private-node1.pfx public.p ``` * Setup cluster with shared components + * In a separate terminal, you may run `k9s` to view the pod status. ``` $ solo cluster setup @@ -166,8 +167,6 @@ Kubernetes Namespace : solo ``` -In a separate terminal, you may run `k9s` to view the pod status. - * Deploy helm chart with Hedera network components * It may take a while (5~15 minutes depending on your internet speed) to download various docker images and get the pods started. * If it fails, ensure you have enough resources allocated for Docker and restart. @@ -181,8 +180,6 @@ Kubernetes Context : kind-solo Kubernetes Cluster : kind-solo Kubernetes Namespace : solo ********************************************************************************** -(node:76336) [DEP0040] DeprecationWarning: The `punycode` module is deprecated. Please use a userland alternative instead. -(Use `node --trace-deprecation ...` to show where the warning was created) ✔ Initialize ✔ Install chart 'fullstack-deployment' [3s] ✔ Waiting for network pods to be ready [8m54s] @@ -204,8 +201,6 @@ Kubernetes Context : kind-solo Kubernetes Cluster : kind-solo Kubernetes Namespace : solo ********************************************************************************** -(node:78205) [DEP0040] DeprecationWarning: The `punycode` module is deprecated. Please use a userland alternative instead. -(Use `node --trace-deprecation ...` to show where the warning was created) ✔ Initialize ✔ Identify network pods ✔ Check network pod: node0 @@ -315,9 +310,9 @@ You may view the list of pods using `k9s` as below: Once the nodes are up, you may now expose various services (using `k9s` (shift-f) or `kubectl port-forward`) and access. Below are most used services that you may expose. -* Node services: Prefix `network--svc` +* Node services: `network--svc` * HAProxy: `haproxy--svc` -* EnvoyProxy: `envoy-proxy--svc` +* Envoy Proxy: `envoy-proxy--svc` * Hedera explorer: `fullstack-deployment-hedera-explorer` ``` @@ -360,7 +355,7 @@ $ solo init -t v0.47.0-alpha.0 -i n0,n1,n2 -n "${SOLO_NAMESPACE}" -s "${SOLO_CLU # output is similar as example-1 ``` -* Generate `pem` node keys for default node IDs: node0,node1,node2 +* Generate `pem` formatted node keys ``` $ solo node keys --gossip-keys --tls-keys --key-format pem diff --git a/solo/src/commands/cluster.mjs b/solo/src/commands/cluster.mjs index 79e35fb3e..8fcfc2e75 100644 --- a/solo/src/commands/cluster.mjs +++ b/solo/src/commands/cluster.mjs @@ -191,7 +191,9 @@ export class ClusterCommand extends BaseCommand { task: async (ctx, _) => { const namespace = ctx.config.clusterSetupNamespace await self.chartManager.uninstall(namespace, constants.FULLSTACK_CLUSTER_SETUP_CHART) - await self.showInstalledChartList(namespace) + if (argv.dev) { + await self.showInstalledChartList(namespace) + } }, skip: (ctx, _) => !ctx.isChartInstalled } diff --git a/solo/src/commands/init.mjs b/solo/src/commands/init.mjs index eaa15564e..d59cc0b4f 100644 --- a/solo/src/commands/init.mjs +++ b/solo/src/commands/init.mjs @@ -68,12 +68,6 @@ export class InitCommand extends BaseCommand { ctx.dirs = this.setupHomeDirectory() } }, - { - title: 'Setup config manager', - task: async (ctx, _) => { - this.configManager.load(argv, true) - } - }, { title: 'Check dependencies', task: async (_, task) => { diff --git a/solo/src/core/chart_manager.mjs b/solo/src/core/chart_manager.mjs index 8daea59c3..570b7f916 100644 --- a/solo/src/core/chart_manager.mjs +++ b/solo/src/core/chart_manager.mjs @@ -98,6 +98,7 @@ export class ChartManager { } async isChartInstalled (namespaceName, chartName) { + this.logger.debug(`> checking if chart is installed [ chart: ${chartName}, namespace: ${namespaceName} ]`) const charts = await this.getInstalledCharts(namespaceName) for (const item of charts) { if (item.startsWith(chartName)) { @@ -110,7 +111,6 @@ export class ChartManager { async uninstall (namespaceName, chartName) { try { - this.logger.debug(`> checking chart release: ${chartName}`) const isInstalled = await this.isChartInstalled(namespaceName, chartName) if (isInstalled) { this.logger.debug(`uninstalling chart release: ${chartName}`) diff --git a/solo/src/core/config_manager.mjs b/solo/src/core/config_manager.mjs index 8b20ba45f..7b2f9e78a 100644 --- a/solo/src/core/config_manager.mjs +++ b/solo/src/core/config_manager.mjs @@ -163,6 +163,15 @@ export class ConfigManager { } } + /** + * Check if a flag value is set + * @param flag flag object + * @return {boolean} + */ + hasFlag (flag) { + return this.config.flags[flag.name] !== undefined + } + /** * Return the value of the given flag * diff --git a/solo/src/index.mjs b/solo/src/index.mjs index d88040aca..e435c80e2 100644 --- a/solo/src/index.mjs +++ b/solo/src/index.mjs @@ -50,19 +50,6 @@ export function main (argv) { const kubeConfig = k8.getKubeConfig() const context = kubeConfig.getContextObject(kubeConfig.getCurrentContext()) const cluster = kubeConfig.getCurrentCluster() - const config = configManager.load() - configManager.setFlag(flags.clusterName, cluster.name) - if (context.namespace) { - configManager.setFlag(flags.namespace, context.namespace) - } - configManager.persist() - - logger.showUser(chalk.cyan('\n******************************* Solo *********************************************')) - logger.showUser(chalk.cyan('Version\t\t\t:'), chalk.yellow(configManager.getVersion())) - logger.showUser(chalk.cyan('Kubernetes Context\t:'), chalk.yellow(context.name)) - logger.showUser(chalk.cyan('Kubernetes Cluster\t:'), chalk.yellow(configManager.getFlag(flags.clusterName))) - logger.showUser(chalk.cyan('Kubernetes Namespace\t:'), chalk.yellow(configManager.getFlag(flags.namespace))) - logger.showUser(chalk.cyan('**********************************************************************************')) const opts = { logger, @@ -76,20 +63,48 @@ export function main (argv) { keyManager } - const processArguments = (args, yargs) => { + const processArguments = (argv, yargs) => { + if (argv._[0] === 'init') { + configManager.load({}, true) // reset cached config + } else { + configManager.load() + } + + // load cluster name and namespace from kubernetes context + configManager.setFlag(flags.clusterName, cluster.name) + if (context.namespace) { + // this will be overwritten if user has passed --namespace flag + configManager.setFlag(flags.namespace, context.namespace) + } + for (const key of Object.keys(yargs.parsed.aliases)) { const flag = flags.allFlagsMap.get(key) if (flag) { - if (args[key]) { - // argv takes precedence - } else if (config.flags[key]) { - args[key] = config.flags[key] - } else if (args._[0] !== 'init') { - args[key] = flag.definition.defaultValue + if (argv[key] !== undefined) { + // argv takes precedence, nothing to do + } else if (configManager.hasFlag(flag)) { + argv[key] = configManager.getFlag(flag) + } else if (argv._[0] !== 'init') { + argv[key] = flag.definition.defaultValue } } } - return args + + // Update config manager and persist the config. + // Note: Because of this centralized loading, we really don't need to load argv in configManager later in + // the command execution handlers. However, we are loading argv again in the command handlers to facilitate testing + // with argv injection into the command handlers. + configManager.load(argv) + configManager.persist() + + logger.showUser(chalk.cyan('\n******************************* Solo *********************************************')) + logger.showUser(chalk.cyan('Version\t\t\t:'), chalk.yellow(configManager.getVersion())) + logger.showUser(chalk.cyan('Kubernetes Context\t:'), chalk.yellow(context.name)) + logger.showUser(chalk.cyan('Kubernetes Cluster\t:'), chalk.yellow(configManager.getFlag(flags.clusterName))) + logger.showUser(chalk.cyan('Kubernetes Namespace\t:'), chalk.yellow(configManager.getFlag(flags.namespace))) + logger.showUser(chalk.cyan('**********************************************************************************')) + + return argv } return yargs(hideBin(argv))