Skip to content

Commit

Permalink
fix: ability to load configuration from stdin
Browse files Browse the repository at this point in the history
fixes: [BUG] Honor stdin for config k3d cluster create -c k3d-io#1123
  • Loading branch information
kameshsampath committed Aug 14, 2022
1 parent 017f1da commit 0fee1a2
Show file tree
Hide file tree
Showing 3 changed files with 189 additions and 4 deletions.
25 changes: 21 additions & 4 deletions cmd/util/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ package config

import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
Expand All @@ -31,6 +32,7 @@ import (
"github.com/spf13/viper"
"sigs.k8s.io/yaml"

"github.com/k3d-io/k3d/v5/cmd/util"
"github.com/k3d-io/k3d/v5/pkg/config"
l "github.com/k3d-io/k3d/v5/pkg/logger"
)
Expand All @@ -46,7 +48,12 @@ func InitViperWithConfigFile(cfgViper *viper.Viper, configFile string) error {
// Set config file, if specified
if configFile != "" {

if _, err := os.Stat(configFile); err != nil {
streams := util.StandardIOStreams()
//flag to mark from where we read the config
fromStdIn := false
if configFile == "-" {
fromStdIn = true
} else if _, err := os.Stat(configFile); err != nil {
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
}

Expand All @@ -58,10 +65,20 @@ func InitViperWithConfigFile(cfgViper *viper.Viper, configFile string) error {
}
defer tmpfile.Close()

originalcontent, err := os.ReadFile(configFile)
if err != nil {
l.Log().Fatalf("error reading config file %s: %v", configFile, err)
var originalcontent []byte
if fromStdIn {
// otherwise read from stdin
originalcontent, err = ioutil.ReadAll(streams.In)
if err != nil {
l.Log().Fatalf("Failed to read config file from stdin: %+v", configFile, err)
}
} else {
originalcontent, err = os.ReadFile(configFile)
if err != nil {
l.Log().Fatalf("error reading config file %s: %v", configFile, err)
}
}

expandedcontent := os.ExpandEnv(string(originalcontent))
if _, err := tmpfile.WriteString(expandedcontent); err != nil {
l.Log().Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err)
Expand Down
28 changes: 28 additions & 0 deletions cmd/util/iostreams.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
package util

import (
"io"
"os"
)

// IOStreams provides the standard names for iostreams.
// This is useful for embedding and for unit testing.
// Inconsistent and different names make it hard to read and review code
// This is based on https://github.com/kubernetes-sigs/kind/blob/main/pkg/cmd/iostreams.go, but just the nice type without the dependency
type IOStreams struct {
// In think, os.Stdin
In io.Reader
// Out think, os.Stdout
Out io.Writer
// ErrOut think, os.Stderr
ErrOut io.Writer
}

// StandardIOStreams returns an IOStreams from os.Stdin, os.Stdout
func StandardIOStreams() IOStreams {
return IOStreams{
In: os.Stdin,
Out: os.Stdout,
ErrOut: os.Stderr,
}
}
140 changes: 140 additions & 0 deletions tests/test_config_file_from_stdin.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
#!/bin/bash

CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; }

# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"

### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE

# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###


: "${EXTRA_FLAG:=""}"
: "${EXTRA_TITLE:=""}"

if [[ -n "$K3S_IMAGE" ]]; then
EXTRA_FLAG="--image rancher/k3s:$K3S_IMAGE"
EXTRA_TITLE="(rancher/k3s:$K3S_IMAGE)"
fi

export CURRENT_STAGE="Test | config-file | $K3S_IMAGE"

configfileoriginal="$CURR_DIR/assets/config_test_simple.yaml"
configfile="/tmp/config_test_simple-tmp_$(date -u +'%Y%m%dT%H%M%SZ').yaml"
clustername="configtest"

sed -E "s/^ name:.+/ name: $clustername/g" < "$configfileoriginal" > "$configfile" # replace cluster name in config file so we can use it in this script without running into override issues
cat "$configfile"
highlight "[START] ConfigTest $EXTRA_TITLE"

info "Creating cluster $clustername..."

cat <<EOF | $EXE cluster create "$clustername" --config=-
apiVersion: k3d.io/v1alpha4
kind: Simple
metadata:
name: test
servers: 3
agents: 2
#image: rancher/k3s:latest
volumes:
- volume: $HOME:/some/path
nodeFilters:
- all
env:
- envVar: bar=baz,bob
nodeFilters:
- all
registries:
create:
name: registry.localhost
use: []
config: |
mirrors:
"my.company.registry":
endpoint:
- http://my.company.registry:5000
options:
k3d:
wait: true
timeout: "360s" # should be pretty high for multi-server clusters to allow for a proper startup routine
disableLoadbalancer: false
disableImageVolume: false
k3s:
extraArgs:
- arg: --tls-san=127.0.0.1
nodeFilters:
- server:*
nodeLabels:
- label: foo=bar
nodeFilters:
- server:0
- loadbalancer
kubeconfig:
updateDefaultKubeconfig: true
switchCurrentContext: true
runtime:
labels:
- label: foo=bar
nodeFilters:
- server:0
- loadbalancer
EOF

"$?" || failed "could not create cluster $clustername $EXTRA_TITLE"

info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5

# 1. check initial access to the cluster
info "Checking that we have access to the cluster..."
check_clusters "$clustername" || failed "error checking cluster"

info "Checking that we have 5 nodes online..."
check_multi_node "$clustername" 5 || failed "failed to verify number of nodes"

# 2. check some config settings

## Environment Variables
info "Ensuring that environment variables are present in the node containers as set in the config (with comma)"
exec_in_node "k3d-$clustername-server-0" "env" | grep -q "bar=baz,bob" || failed "Expected env var 'bar=baz,bob' is not present in node k3d-$clustername-server-0"

## Container Labels
info "Ensuring that container labels have been set as stated in the config"
docker_assert_container_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on container/node k3d-$clustername-server-0"

## K3s Node Labels
info "Ensuring that k3s node labels have been set as stated in the config"
k3s_assert_node_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on node k3d-$clustername-server-0"

## Registry Node
registryname="registry.localhost"
info "Ensuring, that we have a registry node present"
$EXE node list "$registryname" || failed "Expected registry node $registryname to be present"

## merged registries.yaml
info "Ensuring, that the registries.yaml file contains both registries"
exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml"
exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "$registryname" || failed "Expected '$registryname' to be in the /etc/rancher/k3s/registries.yaml"

# Cleanup

info "Deleting cluster $clustername (using config file)..."
$EXE cluster delete --config "$configfile" --trace || failed "could not delete the cluster $clustername"

rm "$configfile"

highlight "[DONE] ConfigTest $EXTRA_TITLE"

exit 0


0 comments on commit 0fee1a2

Please sign in to comment.