Skip to content

Commit

Permalink
Finetuning
Browse files Browse the repository at this point in the history
Signed-off-by: Kimmo Lehto <[email protected]>
  • Loading branch information
kke committed Jun 20, 2023
1 parent e2a85c0 commit a4f13c6
Show file tree
Hide file tree
Showing 13 changed files with 133 additions and 153 deletions.
53 changes: 17 additions & 36 deletions action/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,18 @@ package action

import (
"fmt"
"os"
"io"
"time"

"github.com/k0sproject/k0sctl/analytics"
"github.com/k0sproject/k0sctl/phase"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"

log "github.com/sirupsen/logrus"
)

type Apply struct {
// Config is the k0sctl config
Config *v1beta1.Cluster
// Concurrency is the number of concurrent actions to run
Concurrency int
// ConcurrentUploads is the number of concurrent uploads to run
ConcurrentUploads int
// Manager is the phase manager
Manager *phase.Manager
// DisableDowngradeCheck skips the downgrade check
DisableDowngradeCheck bool
// NoWait skips waiting for the cluster to be ready
Expand All @@ -27,27 +22,20 @@ type Apply struct {
NoDrain bool
// RestoreFrom is the path to a cluster backup archive to restore the state from
RestoreFrom string
// KubeconfigOut is the path to write the kubeconfig to
KubeconfigOut string
// KubeconfigOut is a writer to write the kubeconfig to
KubeconfigOut io.Writer
// KubeconfigAPIAddress is the API address to use in the kubeconfig
KubeconfigAPIAddress string
// LogFile is the path where log will be found from
LogFile *os.File
}

func (a Apply) Run() error {
start := time.Now()

if a.Config == nil {
return fmt.Errorf("config is nil")
}

phase.NoWait = a.NoWait

manager := phase.Manager{Config: a.Config, Concurrency: a.Concurrency, ConcurrentUploads: a.ConcurrentUploads}
lockPhase := &phase.Lock{}

manager.AddPhase(
a.Manager.AddPhase(
&phase.Connect{},
&phase.DetectOS{},
lockPhase,
Expand Down Expand Up @@ -77,13 +65,11 @@ func (a Apply) Run() error {
&phase.RunHooks{Stage: "after", Action: "apply"},
)

var kubeCfgPhase *phase.GetKubeconfig
if a.KubeconfigOut != "" {
kubeCfgPhase = &phase.GetKubeconfig{APIAddress: a.KubeconfigAPIAddress}
manager.AddPhase(kubeCfgPhase)
if a.KubeconfigOut != nil {
a.Manager.AddPhase(&phase.GetKubeconfig{APIAddress: a.KubeconfigAPIAddress})
}

manager.AddPhase(
a.Manager.AddPhase(
&phase.Unlock{Cancel: lockPhase.Cancel},
&phase.Disconnect{},
)
Expand All @@ -92,20 +78,15 @@ func (a Apply) Run() error {

var result error

if result = manager.Run(); result != nil {
analytics.Client.Publish("apply-failure", map[string]interface{}{"clusterID": manager.Config.Spec.K0s.Metadata.ClusterID})
if lf := a.LogFile; lf != nil {
log.Errorf("apply failed - log file saved to %s", lf.Name())
}
if result = a.Manager.Run(); result != nil {
analytics.Client.Publish("apply-failure", map[string]interface{}{"clusterID": a.Manager.Config.Spec.K0s.Metadata.ClusterID})
return result
}

analytics.Client.Publish("apply-success", map[string]interface{}{"duration": time.Since(start), "clusterID": manager.Config.Spec.K0s.Metadata.ClusterID})
if a.KubeconfigOut != "" {
if err := os.WriteFile(a.KubeconfigOut, []byte(manager.Config.Metadata.Kubeconfig), 0644); err != nil {
analytics.Client.Publish("apply-success", map[string]interface{}{"duration": time.Since(start), "clusterID": a.Manager.Config.Spec.K0s.Metadata.ClusterID})
if a.KubeconfigOut != nil {
if _, err := a.KubeconfigOut.Write([]byte(a.Manager.Config.Metadata.Kubeconfig)); err != nil {
log.Warnf("failed to write kubeconfig to %s: %v", a.KubeconfigOut, err)
} else {
log.Infof("kubeconfig written to %s", a.KubeconfigOut)
}
}

Expand All @@ -114,7 +95,7 @@ func (a Apply) Run() error {
log.Infof(phase.Colorize.Green(text).String())

uninstalled := false
for _, host := range manager.Config.Spec.Hosts {
for _, host := range a.Manager.Config.Spec.Hosts {
if host.Reset {
uninstalled = true
}
Expand All @@ -123,9 +104,9 @@ func (a Apply) Run() error {
log.Info("There were nodes that got uninstalled during the apply phase. Please remove them from your k0sctl config file")
}

log.Infof("k0s cluster version %s is now installed", manager.Config.Spec.K0s.Version)
log.Infof("k0s cluster version %s is now installed", a.Manager.Config.Spec.K0s.Version)

if a.KubeconfigOut != "" {
if a.KubeconfigOut != nil {
log.Infof("Tip: To access the cluster you can now fetch the admin kubeconfig using:")
log.Infof(" " + phase.Colorize.Cyan("k0sctl kubeconfig").String())
}
Expand Down
20 changes: 6 additions & 14 deletions action/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,24 @@ package action

import (
"fmt"
"os"
"time"

"github.com/k0sproject/k0sctl/analytics"
"github.com/k0sproject/k0sctl/phase"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
log "github.com/sirupsen/logrus"
)

type Backup struct {
Config *v1beta1.Cluster
Concurrency int
ConcurrentUploads int
LogFile *os.File
// Manager is the phase manager
Manager *phase.Manager
}

func (b Backup) Run() error {
start := time.Now()

manager := phase.Manager{Config: b.Config, Concurrency: b.Concurrency}
lockPhase := &phase.Lock{}

manager.AddPhase(
b.Manager.AddPhase(
&phase.Connect{},
&phase.DetectOS{},
lockPhase,
Expand All @@ -40,15 +35,12 @@ func (b Backup) Run() error {

analytics.Client.Publish("backup-start", map[string]interface{}{})

if err := manager.Run(); err != nil {
analytics.Client.Publish("backup-failure", map[string]interface{}{"clusterID": manager.Config.Spec.K0s.Metadata.ClusterID})
if b.LogFile != nil {
log.Errorf("backup failed - log file saved to %s", b.LogFile.Name())
}
if err := b.Manager.Run(); err != nil {
analytics.Client.Publish("backup-failure", map[string]interface{}{"clusterID": b.Manager.Config.Spec.K0s.Metadata.ClusterID})
return err
}

analytics.Client.Publish("backup-success", map[string]interface{}{"duration": time.Since(start), "clusterID": manager.Config.Spec.K0s.Metadata.ClusterID})
analytics.Client.Publish("backup-success", map[string]interface{}{"duration": time.Since(start), "clusterID": b.Manager.Config.Spec.K0s.Metadata.ClusterID})

duration := time.Since(start).Truncate(time.Second)
text := fmt.Sprintf("==> Finished in %s", duration)
Expand Down
9 changes: 4 additions & 5 deletions action/config_edit.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,10 @@ import (
)

type ConfigEdit struct {
Config *v1beta1.Cluster
Concurrency int
Stdout io.Writer
Stderr io.Writer
Stdin io.Reader
Config *v1beta1.Cluster
Stdout io.Writer
Stderr io.Writer
Stdin io.Reader
}

func (c ConfigEdit) Run() error {
Expand Down
20 changes: 5 additions & 15 deletions action/kubeconfig.go
Original file line number Diff line number Diff line change
@@ -1,39 +1,29 @@
package action

import (
"fmt"

"github.com/k0sproject/k0sctl/phase"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster"
)

type Kubeconfig struct {
Config *v1beta1.Cluster
Concurrency int
// Manager is the phase manager
Manager *phase.Manager
KubeconfigAPIAddress string

Kubeconfig string
}

func (k *Kubeconfig) Run() error {
if k.Config == nil {
return fmt.Errorf("config is nil")
}

c := k.Config

// Change so that the internal config has only single controller host as we
// do not need to connect to all nodes
c.Spec.Hosts = cluster.Hosts{c.Spec.K0sLeader()}
manager := phase.Manager{Config: k.Config, Concurrency: k.Concurrency}
k.Manager.Config.Spec.Hosts = cluster.Hosts{k.Manager.Config.Spec.K0sLeader()}

manager.AddPhase(
k.Manager.AddPhase(
&phase.Connect{},
&phase.DetectOS{},
&phase.GetKubeconfig{APIAddress: k.KubeconfigAPIAddress},
&phase.Disconnect{},
)

return manager.Run()
return k.Manager.Run()
}
26 changes: 11 additions & 15 deletions action/reset.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,31 +2,28 @@ package action

import (
"fmt"
"io"
"os"
"time"

"github.com/k0sproject/k0sctl/analytics"
"github.com/k0sproject/k0sctl/phase"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
log "github.com/sirupsen/logrus"

"github.com/AlecAivazis/survey/v2"
"github.com/mattn/go-isatty"
)

type Reset struct {
Config *v1beta1.Cluster
Concurrency int
Force bool
// Manager is the phase manager
Manager *phase.Manager
Stdout io.Writer
Force bool
}

func (r Reset) Run() error {
if r.Config == nil {
return fmt.Errorf("config is nil")
}

if !r.Force {
if !isatty.IsTerminal(os.Stdout.Fd()) {
if stdoutFile, ok := r.Stdout.(*os.File); ok && !isatty.IsTerminal(stdoutFile.Fd()) {
return fmt.Errorf("reset requires --force")
}
confirmed := false
Expand All @@ -41,13 +38,12 @@ func (r Reset) Run() error {

start := time.Now()

manager := phase.Manager{Config: r.Config, Concurrency: r.Concurrency}
for _, h := range r.Config.Spec.Hosts {
for _, h := range r.Manager.Config.Spec.Hosts {
h.Reset = true
}

lockPhase := &phase.Lock{}
manager.AddPhase(
r.Manager.AddPhase(
&phase.Connect{},
&phase.DetectOS{},
lockPhase,
Expand All @@ -71,12 +67,12 @@ func (r Reset) Run() error {

analytics.Client.Publish("reset-start", map[string]interface{}{})

if err := manager.Run(); err != nil {
analytics.Client.Publish("reset-failure", map[string]interface{}{"clusterID": r.Config.Spec.K0s.Metadata.ClusterID})
if err := r.Manager.Run(); err != nil {
analytics.Client.Publish("reset-failure", map[string]interface{}{"clusterID": r.Manager.Config.Spec.K0s.Metadata.ClusterID})
return err
}

analytics.Client.Publish("reset-success", map[string]interface{}{"duration": time.Since(start), "clusterID": r.Config.Spec.K0s.Metadata.ClusterID})
analytics.Client.Publish("reset-success", map[string]interface{}{"duration": time.Since(start), "clusterID": r.Manager.Config.Spec.K0s.Metadata.ClusterID})

duration := time.Since(start).Truncate(time.Second)
text := fmt.Sprintf("==> Finished in %s", duration)
Expand Down
39 changes: 21 additions & 18 deletions cmd/apply.go
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
package cmd

import (
"fmt"
"io"
"os"

"github.com/k0sproject/k0sctl/action"
"github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1"
"github.com/k0sproject/k0sctl/phase"

"github.com/urfave/cli/v2"
)
Expand Down Expand Up @@ -49,33 +51,34 @@ var applyCommand = &cli.Command{
analyticsFlag,
upgradeCheckFlag,
},
Before: actions(initLogging, startCheckUpgrade, initConfig, displayLogo, initAnalytics, displayCopyright, warnOldCache),
Before: actions(initLogging, startCheckUpgrade, initConfig, initManager, displayLogo, initAnalytics, displayCopyright, warnOldCache),
After: actions(reportCheckUpgrade, closeAnalytics),
Action: func(ctx *cli.Context) error {
logWriter, err := LogFile()
if err != nil {
return err
}

var lf *os.File
var kubeconfigOut io.Writer

if l, ok := logWriter.(*os.File); ok && l != nil {
lf = l
if kc := ctx.String("kubeconfig-out"); kc != "" {
out, err := os.OpenFile(kc, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return fmt.Errorf("failed to open kubeconfig-out file: %w", err)
}
defer out.Close()
kubeconfigOut = out
}

applier := action.Apply{
applyAction := action.Apply{
Manager: ctx.Context.Value(ctxManagerKey{}).(*phase.Manager),
KubeconfigOut: kubeconfigOut,
KubeconfigAPIAddress: ctx.String("kubeconfig-api-address"),
NoWait: ctx.Bool("no-wait"),
NoDrain: ctx.Bool("no-drain"),
Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster),
Concurrency: ctx.Int("concurrency"),
ConcurrentUploads: ctx.Int("concurrent-uploads"),
DisableDowngradeCheck: ctx.Bool("disable-downgrade-check"),
KubeconfigOut: ctx.String("kubeconfig-out"),
KubeconfigAPIAddress: ctx.String("kubeconfig-api-address"),
RestoreFrom: ctx.String("restore-from"),
LogFile: lf,
}

return applier.Run()
if err := applyAction.Run(); err != nil {
return fmt.Errorf("apply failed - log file saved to %s: %w", ctx.Context.Value(ctxLogFileKey{}).(string), err)
}

return nil
},
}
Loading

0 comments on commit a4f13c6

Please sign in to comment.