Skip to content

Commit

Permalink
fix(cli): solo init should reset cached config (#737)
Browse files Browse the repository at this point in the history
Signed-off-by: Lenin Mehedy <[email protected]>
Signed-off-by: Jeromy Cannon <[email protected]>
Co-authored-by: Jeromy Cannon <[email protected]>
  • Loading branch information
leninmehedy and jeromy-cannon authored Feb 7, 2024
1 parent 72982a5 commit 911d25b
Show file tree
Hide file tree
Showing 6 changed files with 54 additions and 39 deletions.
15 changes: 5 additions & 10 deletions solo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ $ kubectx <context-name>
```

* For a local cluster, you may use [kind](https://kind.sigs.k8s.io/) and [kubectl](https://kubernetes.io/docs/tasks/tools/) to create a cluster and namespace as below.
* In this case, ensure your Docker has enough resources (e.g. Memory >=8Gb, CPU: >=4).
* In this case, ensure your Docker engine has enough resources (e.g. Memory >=8Gb, CPU: >=4).

```
$ export SOLO_CLUSTER_NAME=solo
Expand Down Expand Up @@ -150,6 +150,7 @@ hedera-node0.key hedera-node1.key hedera-node2.key private-node1.pfx public.p
```

* Setup cluster with shared components
* In a separate terminal, you may run `k9s` to view the pod status.

```
$ solo cluster setup
Expand All @@ -166,8 +167,6 @@ Kubernetes Namespace : solo
```

In a separate terminal, you may run `k9s` to view the pod status.

* Deploy helm chart with Hedera network components
* It may take a while (5~15 minutes depending on your internet speed) to download various docker images and get the pods started.
* If it fails, ensure you have enough resources allocated for Docker and restart.
Expand All @@ -181,8 +180,6 @@ Kubernetes Context : kind-solo
Kubernetes Cluster : kind-solo
Kubernetes Namespace : solo
**********************************************************************************
(node:76336) [DEP0040] DeprecationWarning: The `punycode` module is deprecated. Please use a userland alternative instead.
(Use `node --trace-deprecation ...` to show where the warning was created)
✔ Initialize
✔ Install chart 'fullstack-deployment' [3s]
✔ Waiting for network pods to be ready [8m54s]
Expand All @@ -204,8 +201,6 @@ Kubernetes Context : kind-solo
Kubernetes Cluster : kind-solo
Kubernetes Namespace : solo
**********************************************************************************
(node:78205) [DEP0040] DeprecationWarning: The `punycode` module is deprecated. Please use a userland alternative instead.
(Use `node --trace-deprecation ...` to show where the warning was created)
✔ Initialize
✔ Identify network pods
✔ Check network pod: node0
Expand Down Expand Up @@ -315,9 +310,9 @@ You may view the list of pods using `k9s` as below:

Once the nodes are up, you may now expose various services (using `k9s` (shift-f) or `kubectl port-forward`) and access. Below are most used services that you may expose.

* Node services: Prefix `network-<node ID>-svc`
* Node services: `network-<node ID>-svc`
* HAProxy: `haproxy-<node ID>-svc`
* EnvoyProxy: `envoy-proxy-<node ID>-svc`
* Envoy Proxy: `envoy-proxy-<node ID>-svc`
* Hedera explorer: `fullstack-deployment-hedera-explorer`

```
Expand Down Expand Up @@ -360,7 +355,7 @@ $ solo init -t v0.47.0-alpha.0 -i n0,n1,n2 -n "${SOLO_NAMESPACE}" -s "${SOLO_CLU
# output is similar as example-1
```

* Generate `pem` node keys for default node IDs: node0,node1,node2
* Generate `pem` formatted node keys

```
$ solo node keys --gossip-keys --tls-keys --key-format pem
Expand Down
4 changes: 3 additions & 1 deletion solo/src/commands/cluster.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,9 @@ export class ClusterCommand extends BaseCommand {
task: async (ctx, _) => {
const namespace = ctx.config.clusterSetupNamespace
await self.chartManager.uninstall(namespace, constants.FULLSTACK_CLUSTER_SETUP_CHART)
await self.showInstalledChartList(namespace)
if (argv.dev) {
await self.showInstalledChartList(namespace)
}
},
skip: (ctx, _) => !ctx.isChartInstalled
}
Expand Down
6 changes: 0 additions & 6 deletions solo/src/commands/init.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,6 @@ export class InitCommand extends BaseCommand {
ctx.dirs = this.setupHomeDirectory()
}
},
{
title: 'Setup config manager',
task: async (ctx, _) => {
this.configManager.load(argv, true)
}
},
{
title: 'Check dependencies',
task: async (_, task) => {
Expand Down
2 changes: 1 addition & 1 deletion solo/src/core/chart_manager.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ export class ChartManager {
}

async isChartInstalled (namespaceName, chartName) {
this.logger.debug(`> checking if chart is installed [ chart: ${chartName}, namespace: ${namespaceName} ]`)
const charts = await this.getInstalledCharts(namespaceName)
for (const item of charts) {
if (item.startsWith(chartName)) {
Expand All @@ -110,7 +111,6 @@ export class ChartManager {

async uninstall (namespaceName, chartName) {
try {
this.logger.debug(`> checking chart release: ${chartName}`)
const isInstalled = await this.isChartInstalled(namespaceName, chartName)
if (isInstalled) {
this.logger.debug(`uninstalling chart release: ${chartName}`)
Expand Down
9 changes: 9 additions & 0 deletions solo/src/core/config_manager.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,15 @@ export class ConfigManager {
}
}

/**
* Check if a flag value is set
* @param flag flag object
* @return {boolean}
*/
hasFlag (flag) {
return this.config.flags[flag.name] !== undefined
}

/**
* Return the value of the given flag
*
Expand Down
57 changes: 36 additions & 21 deletions solo/src/index.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -50,19 +50,6 @@ export function main (argv) {
const kubeConfig = k8.getKubeConfig()
const context = kubeConfig.getContextObject(kubeConfig.getCurrentContext())
const cluster = kubeConfig.getCurrentCluster()
const config = configManager.load()
configManager.setFlag(flags.clusterName, cluster.name)
if (context.namespace) {
configManager.setFlag(flags.namespace, context.namespace)
}
configManager.persist()

logger.showUser(chalk.cyan('\n******************************* Solo *********************************************'))
logger.showUser(chalk.cyan('Version\t\t\t:'), chalk.yellow(configManager.getVersion()))
logger.showUser(chalk.cyan('Kubernetes Context\t:'), chalk.yellow(context.name))
logger.showUser(chalk.cyan('Kubernetes Cluster\t:'), chalk.yellow(configManager.getFlag(flags.clusterName)))
logger.showUser(chalk.cyan('Kubernetes Namespace\t:'), chalk.yellow(configManager.getFlag(flags.namespace)))
logger.showUser(chalk.cyan('**********************************************************************************'))

const opts = {
logger,
Expand All @@ -76,20 +63,48 @@ export function main (argv) {
keyManager
}

const processArguments = (args, yargs) => {
const processArguments = (argv, yargs) => {
if (argv._[0] === 'init') {
configManager.load({}, true) // reset cached config
} else {
configManager.load()
}

// load cluster name and namespace from kubernetes context
configManager.setFlag(flags.clusterName, cluster.name)
if (context.namespace) {
// this will be overwritten if user has passed --namespace flag
configManager.setFlag(flags.namespace, context.namespace)
}

for (const key of Object.keys(yargs.parsed.aliases)) {
const flag = flags.allFlagsMap.get(key)
if (flag) {
if (args[key]) {
// argv takes precedence
} else if (config.flags[key]) {
args[key] = config.flags[key]
} else if (args._[0] !== 'init') {
args[key] = flag.definition.defaultValue
if (argv[key] !== undefined) {
// argv takes precedence, nothing to do
} else if (configManager.hasFlag(flag)) {
argv[key] = configManager.getFlag(flag)
} else if (argv._[0] !== 'init') {
argv[key] = flag.definition.defaultValue
}
}
}
return args

// Update config manager and persist the config.
// Note: Because of this centralized loading, we really don't need to load argv in configManager later in
// the command execution handlers. However, we are loading argv again in the command handlers to facilitate testing
// with argv injection into the command handlers.
configManager.load(argv)
configManager.persist()

logger.showUser(chalk.cyan('\n******************************* Solo *********************************************'))
logger.showUser(chalk.cyan('Version\t\t\t:'), chalk.yellow(configManager.getVersion()))
logger.showUser(chalk.cyan('Kubernetes Context\t:'), chalk.yellow(context.name))
logger.showUser(chalk.cyan('Kubernetes Cluster\t:'), chalk.yellow(configManager.getFlag(flags.clusterName)))
logger.showUser(chalk.cyan('Kubernetes Namespace\t:'), chalk.yellow(configManager.getFlag(flags.namespace)))
logger.showUser(chalk.cyan('**********************************************************************************'))

return argv
}

return yargs(hideBin(argv))
Expand Down

0 comments on commit 911d25b

Please sign in to comment.