From c28e1ff58ab0e7329ebdfcab538df892ee4465d9 Mon Sep 17 00:00:00 2001 From: Kimmo Lehto Date: Wed, 14 Jun 2023 13:47:56 +0300 Subject: [PATCH 1/6] Extract logic from cmd/apply into action.Apply Signed-off-by: Kimmo Lehto --- action/apply.go | 137 ++++++++++++++++++++++++++++++++++++++++++++++++ cmd/apply.go | 114 +++++++--------------------------------- 2 files changed, 157 insertions(+), 94 deletions(-) create mode 100644 action/apply.go diff --git a/action/apply.go b/action/apply.go new file mode 100644 index 00000000..cf697fa4 --- /dev/null +++ b/action/apply.go @@ -0,0 +1,137 @@ +package action + +import ( + "fmt" + "os" + "time" + + "github.com/k0sproject/k0sctl/analytics" + "github.com/k0sproject/k0sctl/phase" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + + log "github.com/sirupsen/logrus" +) + +type Apply struct { + // Config is the k0sctl config + Config *v1beta1.Cluster + // Concurrency is the number of concurrent actions to run + Concurrency int + // ConcurrentUploads is the number of concurrent uploads to run + ConcurrentUploads int + // DisableDowngradeCheck skips the downgrade check + DisableDowngradeCheck bool + // Force allows forced installation in case of certain failures + Force bool + // NoWait skips waiting for the cluster to be ready + NoWait bool + // NoDrain skips draining worker nodes + NoDrain bool + // RestoreFrom is the path to a cluster backup archive to restore the state from + RestoreFrom string + // KubeconfigOut is the path to write the kubeconfig to + KubeconfigOut string + // KubeconfigAPIAddress is the API address to use in the kubeconfig + KubeconfigAPIAddress string + // LogFile is the path where log will be found from + LogFile *os.File +} + +func (a Apply) Run() error { + start := time.Now() + + if a.Config == nil { + return fmt.Errorf("config is nil") + } + + phase.NoWait = a.NoWait + phase.Force = a.Force + + manager := phase.Manager{Config: a.Config, Concurrency: a.Concurrency, ConcurrentUploads: a.ConcurrentUploads} + lockPhase := &phase.Lock{} + + manager.AddPhase( + &phase.Connect{}, + &phase.DetectOS{}, + lockPhase, + &phase.PrepareHosts{}, + &phase.GatherFacts{}, + &phase.DownloadBinaries{}, + &phase.UploadFiles{}, + &phase.ValidateHosts{}, + &phase.GatherK0sFacts{}, + &phase.ValidateFacts{SkipDowngradeCheck: a.DisableDowngradeCheck}, + &phase.UploadBinaries{}, + &phase.DownloadK0s{}, + &phase.InstallBinaries{}, + &phase.RunHooks{Stage: "before", Action: "apply"}, + &phase.PrepareArm{}, + &phase.ConfigureK0s{}, + &phase.Restore{ + RestoreFrom: a.RestoreFrom, + }, + &phase.InitializeK0s{}, + &phase.InstallControllers{}, + &phase.InstallWorkers{}, + &phase.UpgradeControllers{}, + &phase.UpgradeWorkers{NoDrain: a.NoDrain}, + &phase.ResetWorkers{NoDrain: a.NoDrain}, + &phase.ResetControllers{NoDrain: a.NoDrain}, + &phase.RunHooks{Stage: "after", Action: "apply"}, + ) + + var kubeCfgPhase *phase.GetKubeconfig + if a.KubeconfigOut != "" { + kubeCfgPhase = &phase.GetKubeconfig{APIAddress: a.KubeconfigAPIAddress} + manager.AddPhase(kubeCfgPhase) + } + + manager.AddPhase( + &phase.Unlock{Cancel: lockPhase.Cancel}, + &phase.Disconnect{}, + ) + + analytics.Client.Publish("apply-start", map[string]interface{}{}) + + var result error + + if result = manager.Run(); result != nil { + analytics.Client.Publish("apply-failure", map[string]interface{}{"clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) + if lf := a.LogFile; lf != nil { + log.Errorf("apply failed - log file saved to %s", lf.Name()) + } + return result + } + + analytics.Client.Publish("apply-success", map[string]interface{}{"duration": time.Since(start), "clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) + if a.KubeconfigOut != "" { + if err := os.WriteFile(a.KubeconfigOut, []byte(manager.Config.Metadata.Kubeconfig), 0644); err != nil { + log.Warnf("failed to write kubeconfig to %s: %v", a.KubeconfigOut, err) + } else { + log.Infof("kubeconfig written to %s", a.KubeconfigOut) + } + } + + duration := time.Since(start).Truncate(time.Second) + text := fmt.Sprintf("==> Finished in %s", duration) + log.Infof(phase.Colorize.Green(text).String()) + + uninstalled := false + for _, host := range manager.Config.Spec.Hosts { + if host.Reset { + uninstalled = true + } + } + if uninstalled { + log.Info("There were nodes that got uninstalled during the apply phase. Please remove them from your k0sctl config file") + } + + log.Infof("k0s cluster version %s is now installed", manager.Config.Spec.K0s.Version) + + if a.KubeconfigOut != "" { + log.Infof("Tip: To access the cluster you can now fetch the admin kubeconfig using:") + log.Infof(" " + phase.Colorize.Cyan("k0sctl kubeconfig").String()) + } + + return nil +} diff --git a/cmd/apply.go b/cmd/apply.go index bb630457..b446f077 100644 --- a/cmd/apply.go +++ b/cmd/apply.go @@ -1,14 +1,10 @@ package cmd import ( - "fmt" "os" - "time" - "github.com/k0sproject/k0sctl/analytics" - "github.com/k0sproject/k0sctl/phase" + "github.com/k0sproject/k0sctl/action" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" - log "github.com/sirupsen/logrus" "github.com/urfave/cli/v2" ) @@ -62,102 +58,32 @@ var applyCommand = &cli.Command{ Before: actions(initLogging, startCheckUpgrade, initConfig, displayLogo, initAnalytics, displayCopyright, warnOldCache), After: actions(reportCheckUpgrade, closeAnalytics), Action: func(ctx *cli.Context) error { - start := time.Now() - phase.NoWait = ctx.Bool("no-wait") - phase.Force = ctx.Bool("force") - manager := phase.Manager{Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), Concurrency: ctx.Int("concurrency"), ConcurrentUploads: ctx.Int("concurrent-uploads")} - lockPhase := &phase.Lock{} - - manager.AddPhase( - &phase.Connect{}, - &phase.DetectOS{}, - lockPhase, - &phase.PrepareHosts{}, - &phase.GatherFacts{}, - &phase.DownloadBinaries{}, - &phase.UploadFiles{}, - &phase.ValidateHosts{}, - &phase.GatherK0sFacts{}, - &phase.ValidateFacts{SkipDowngradeCheck: ctx.Bool("disable-downgrade-check")}, - &phase.UploadBinaries{}, - &phase.DownloadK0s{}, - &phase.InstallBinaries{}, - &phase.RunHooks{Stage: "before", Action: "apply"}, - &phase.PrepareArm{}, - &phase.ConfigureK0s{}, - &phase.Restore{ - RestoreFrom: ctx.String("restore-from"), - }, - &phase.InitializeK0s{}, - &phase.InstallControllers{}, - &phase.InstallWorkers{}, - &phase.UpgradeControllers{}, - &phase.UpgradeWorkers{ - NoDrain: ctx.Bool("no-drain"), - }, - &phase.ResetWorkers{ - NoDrain: ctx.Bool("no-drain"), - }, - &phase.ResetControllers{ - NoDrain: ctx.Bool("no-drain"), - }, - &phase.RunHooks{Stage: "after", Action: "apply"}, - ) - - kubecfgOut := ctx.String("kubeconfig-out") - var kubeCfgPhase *phase.GetKubeconfig - if kubecfgOut != "" { - kubeCfgPhase = &phase.GetKubeconfig{APIAddress: ctx.String("kubeconfig-api-address")} - manager.AddPhase(kubeCfgPhase) + logWriter, err := LogFile() + if err != nil { + return err } - manager.AddPhase( - &phase.Unlock{Cancel: lockPhase.Cancel}, - &phase.Disconnect{}, - ) - - analytics.Client.Publish("apply-start", map[string]interface{}{}) + var lf *os.File - var result error - - if result = manager.Run(); result != nil { - analytics.Client.Publish("apply-failure", map[string]interface{}{"clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) - if lf, err := LogFile(); err == nil { - if ln, ok := lf.(interface{ Name() string }); ok { - log.Errorf("apply failed - log file saved to %s", ln.Name()) - } - } - return result + if l, ok := logWriter.(*os.File); ok && l != nil { + lf = l } - analytics.Client.Publish("apply-success", map[string]interface{}{"duration": time.Since(start), "clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) - if kubecfgOut != "" { - if err := os.WriteFile(kubecfgOut, []byte(manager.Config.Metadata.Kubeconfig), 0644); err != nil { - log.Warnf("failed to write kubeconfig to %s: %v", kubecfgOut, err) - } else { - log.Infof("kubeconfig written to %s", kubecfgOut) - } + applier := action.Apply{ + Force: ctx.Bool("force"), + NoWait: ctx.Bool("no-wait"), + NoDrain: ctx.Bool("no-drain"), + Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), + Concurrency: ctx.Int("concurrency"), + ConcurrentUploads: ctx.Int("concurrent-uploads"), + DisableDowngradeCheck: ctx.Bool("disable-downgrade-check"), + KubeconfigOut: ctx.String("kubeconfig-out"), + KubeconfigAPIAddress: ctx.String("kubeconfig-api-address"), + RestoreFrom: ctx.String("restore-from"), + LogFile: lf, } - duration := time.Since(start).Truncate(time.Second) - text := fmt.Sprintf("==> Finished in %s", duration) - log.Infof(Colorize.Green(text).String()) - - uninstalled := false - for _, host := range manager.Config.Spec.Hosts { - if host.Reset { - uninstalled = true - } - } - if uninstalled { - log.Info("There were nodes that got uninstalled during the apply phase. Please remove them from your k0sctl config file") - } - - log.Infof("k0s cluster version %s is now installed", manager.Config.Spec.K0s.Version) - log.Infof("Tip: To access the cluster you can now fetch the admin kubeconfig using:") - log.Infof(" " + Colorize.Cyan("k0sctl kubeconfig").String()) - - return nil + return applier.Run() }, } From 75341b80d497477b78c32e772a0e1f8c65fa4b08 Mon Sep 17 00:00:00 2001 From: Kimmo Lehto Date: Wed, 14 Jun 2023 15:14:34 +0300 Subject: [PATCH 2/6] Extract logic from cmd/kubeconfig into action.Kubeconfig Signed-off-by: Kimmo Lehto --- action/kubeconfig.go | 39 +++++++++++++++++++++++++++++++++++++++ cmd/kubeconfig.go | 35 ++++++++++++++++------------------- 2 files changed, 55 insertions(+), 19 deletions(-) create mode 100644 action/kubeconfig.go diff --git a/action/kubeconfig.go b/action/kubeconfig.go new file mode 100644 index 00000000..5a378e4b --- /dev/null +++ b/action/kubeconfig.go @@ -0,0 +1,39 @@ +package action + +import ( + "fmt" + + "github.com/k0sproject/k0sctl/phase" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" +) + +type Kubeconfig struct { + Config *v1beta1.Cluster + Concurrency int + KubeconfigAPIAddress string + + Kubeconfig string +} + +func (k *Kubeconfig) Run() error { + if k.Config == nil { + return fmt.Errorf("config is nil") + } + + c := k.Config + + // Change so that the internal config has only single controller host as we + // do not need to connect to all nodes + c.Spec.Hosts = cluster.Hosts{c.Spec.K0sLeader()} + manager := phase.Manager{Config: k.Config, Concurrency: k.Concurrency} + + manager.AddPhase( + &phase.Connect{}, + &phase.DetectOS{}, + &phase.GetKubeconfig{APIAddress: k.KubeconfigAPIAddress}, + &phase.Disconnect{}, + ) + + return manager.Run() +} diff --git a/cmd/kubeconfig.go b/cmd/kubeconfig.go index 23ff7e5c..f05f0f20 100644 --- a/cmd/kubeconfig.go +++ b/cmd/kubeconfig.go @@ -3,10 +3,9 @@ package cmd import ( "fmt" + "github.com/k0sproject/k0sctl/action" "github.com/k0sproject/k0sctl/analytics" - "github.com/k0sproject/k0sctl/phase" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" "github.com/urfave/cli/v2" ) @@ -29,31 +28,29 @@ var kubeconfigCommand = &cli.Command{ analyticsFlag, }, Before: actions(initSilentLogging, initConfig, initAnalytics), - After: func(ctx *cli.Context) error { + After: func(_ *cli.Context) error { analytics.Client.Close() return nil }, Action: func(ctx *cli.Context) error { - c := ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster) - - // Change so that the internal config has only single controller host as we - // do not need to connect to all nodes - c.Spec.Hosts = cluster.Hosts{c.Spec.K0sLeader()} - manager := phase.Manager{Config: c, Concurrency: ctx.Int("concurrency")} + var cfg *v1beta1.Cluster + if c, ok := ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster); ok { + cfg = c + } else { + return fmt.Errorf("config is nil") + } - manager.AddPhase( - &phase.Connect{}, - &phase.DetectOS{}, - &phase.GetKubeconfig{APIAddress: ctx.String("address")}, - &phase.Disconnect{}, - ) + kubeconfigAction := action.Kubeconfig{ + Config: cfg, + Concurrency: ctx.Int("concurrency"), + KubeconfigAPIAddress: ctx.String("address"), + } - if err := manager.Run(); err != nil { + if err := kubeconfigAction.Run(); err != nil { return err } - fmt.Fprintf(ctx.App.Writer, "%s\n", c.Metadata.Kubeconfig) - - return nil + _, err := fmt.Fprintf(ctx.App.Writer, "%s\n", cfg.Metadata.Kubeconfig) + return err }, } From 45597b278266ad594041924cca8c2f6d9a5247dc Mon Sep 17 00:00:00 2001 From: Kimmo Lehto Date: Wed, 14 Jun 2023 15:21:41 +0300 Subject: [PATCH 3/6] Extract logic from cmd/reset into action.Reset Signed-off-by: Kimmo Lehto --- action/reset.go | 86 +++++++++++++++++++++++++++++++++++++++++++++++++ cmd/reset.go | 71 ++++------------------------------------ 2 files changed, 92 insertions(+), 65 deletions(-) create mode 100644 action/reset.go diff --git a/action/reset.go b/action/reset.go new file mode 100644 index 00000000..8bc0909c --- /dev/null +++ b/action/reset.go @@ -0,0 +1,86 @@ +package action + +import ( + "fmt" + "os" + "time" + + "github.com/k0sproject/k0sctl/analytics" + "github.com/k0sproject/k0sctl/phase" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + log "github.com/sirupsen/logrus" + + "github.com/AlecAivazis/survey/v2" + "github.com/mattn/go-isatty" +) + +type Reset struct { + Config *v1beta1.Cluster + Concurrency int + Force bool +} + +func (r Reset) Run() error { + if r.Config == nil { + return fmt.Errorf("config is nil") + } + + if !r.Force { + if !isatty.IsTerminal(os.Stdout.Fd()) { + return fmt.Errorf("reset requires --force") + } + confirmed := false + prompt := &survey.Confirm{ + Message: "Going to reset all of the hosts, which will destroy all configuration and data, Are you sure?", + } + _ = survey.AskOne(prompt, &confirmed) + if !confirmed { + return fmt.Errorf("confirmation or --force required to proceed") + } + } + + start := time.Now() + + manager := phase.Manager{Config: r.Config, Concurrency: r.Concurrency} + for _, h := range r.Config.Spec.Hosts { + h.Reset = true + } + + lockPhase := &phase.Lock{} + manager.AddPhase( + &phase.Connect{}, + &phase.DetectOS{}, + lockPhase, + &phase.PrepareHosts{}, + &phase.GatherK0sFacts{}, + &phase.RunHooks{Stage: "before", Action: "reset"}, + &phase.ResetWorkers{ + NoDrain: true, + NoDelete: true, + }, + &phase.ResetControllers{ + NoDrain: true, + NoDelete: true, + NoLeave: true, + }, + &phase.ResetLeader{}, + &phase.RunHooks{Stage: "after", Action: "reset"}, + &phase.Unlock{Cancel: lockPhase.Cancel}, + &phase.Disconnect{}, + ) + + analytics.Client.Publish("reset-start", map[string]interface{}{}) + + if err := manager.Run(); err != nil { + analytics.Client.Publish("reset-failure", map[string]interface{}{"clusterID": r.Config.Spec.K0s.Metadata.ClusterID}) + return err + } + + analytics.Client.Publish("reset-success", map[string]interface{}{"duration": time.Since(start), "clusterID": r.Config.Spec.K0s.Metadata.ClusterID}) + + duration := time.Since(start).Truncate(time.Second) + text := fmt.Sprintf("==> Finished in %s", duration) + log.Infof(phase.Colorize.Green(text).String()) + + return nil +} diff --git a/cmd/reset.go b/cmd/reset.go index 0e46aeeb..24d19ec8 100644 --- a/cmd/reset.go +++ b/cmd/reset.go @@ -1,17 +1,9 @@ package cmd import ( - "fmt" - "os" - "time" - - "github.com/k0sproject/k0sctl/analytics" - "github.com/k0sproject/k0sctl/phase" + "github.com/k0sproject/k0sctl/action" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" - log "github.com/sirupsen/logrus" - "github.com/AlecAivazis/survey/v2" - "github.com/mattn/go-isatty" "github.com/urfave/cli/v2" ) @@ -37,63 +29,12 @@ var resetCommand = &cli.Command{ Before: actions(initLogging, startCheckUpgrade, initConfig, initAnalytics, displayCopyright), After: actions(reportCheckUpgrade, closeAnalytics), Action: func(ctx *cli.Context) error { - if !ctx.Bool("force") { - if !isatty.IsTerminal(os.Stdout.Fd()) { - return fmt.Errorf("reset requires --force") - } - confirmed := false - prompt := &survey.Confirm{ - Message: "Going to reset all of the hosts, which will destroy all configuration and data, Are you sure?", - } - _ = survey.AskOne(prompt, &confirmed) - if !confirmed { - return fmt.Errorf("confirmation or --force required to proceed") - } - } - - start := time.Now() - - manager := phase.Manager{Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), Concurrency: ctx.Int("concurrency")} - for _, h := range manager.Config.Spec.Hosts { - h.Reset = true - } - - lockPhase := &phase.Lock{} - manager.AddPhase( - &phase.Connect{}, - &phase.DetectOS{}, - lockPhase, - &phase.PrepareHosts{}, - &phase.GatherK0sFacts{}, - &phase.RunHooks{Stage: "before", Action: "reset"}, - &phase.ResetWorkers{ - NoDrain: true, - NoDelete: true, - }, - &phase.ResetControllers{ - NoDrain: true, - NoDelete: true, - NoLeave: true, - }, - &phase.ResetLeader{}, - &phase.RunHooks{Stage: "after", Action: "reset"}, - &phase.Unlock{Cancel: lockPhase.Cancel}, - &phase.Disconnect{}, - ) - - analytics.Client.Publish("reset-start", map[string]interface{}{}) - - if err := manager.Run(); err != nil { - analytics.Client.Publish("reset-failure", map[string]interface{}{"clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) - return err + resetAction := action.Reset{ + Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), + Concurrency: ctx.Int("concurrency"), + Force: ctx.Bool("force"), } - analytics.Client.Publish("reset-success", map[string]interface{}{"duration": time.Since(start), "clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) - - duration := time.Since(start).Truncate(time.Second) - text := fmt.Sprintf("==> Finished in %s", duration) - log.Infof(Colorize.Green(text).String()) - - return nil + return resetAction.Run() }, } From d383d88270976ddd9ad2150a75c32243456ef888 Mon Sep 17 00:00:00 2001 From: Kimmo Lehto Date: Thu, 15 Jun 2023 13:42:02 +0300 Subject: [PATCH 4/6] Extract logic from cmd/backup into action.Backup Signed-off-by: Kimmo Lehto --- action/backup.go | 57 ++++++++++++++++++++++++++++++++++++++++++++++++ cmd/backup.go | 52 +++++++++++++------------------------------ 2 files changed, 72 insertions(+), 37 deletions(-) create mode 100644 action/backup.go diff --git a/action/backup.go b/action/backup.go new file mode 100644 index 00000000..e4656d61 --- /dev/null +++ b/action/backup.go @@ -0,0 +1,57 @@ +package action + +import ( + "fmt" + "os" + "time" + + "github.com/k0sproject/k0sctl/analytics" + "github.com/k0sproject/k0sctl/phase" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + log "github.com/sirupsen/logrus" +) + +type Backup struct { + Config *v1beta1.Cluster + Concurrency int + ConcurrentUploads int + LogFile *os.File +} + +func (b Backup) Run() error { + start := time.Now() + + manager := phase.Manager{Config: b.Config, Concurrency: b.Concurrency} + lockPhase := &phase.Lock{} + + manager.AddPhase( + &phase.Connect{}, + &phase.DetectOS{}, + lockPhase, + &phase.PrepareHosts{}, + &phase.GatherFacts{}, + &phase.GatherK0sFacts{}, + &phase.RunHooks{Stage: "before", Action: "backup"}, + &phase.Backup{}, + &phase.RunHooks{Stage: "after", Action: "backup"}, + &phase.Unlock{Cancel: lockPhase.Cancel}, + &phase.Disconnect{}, + ) + + analytics.Client.Publish("backup-start", map[string]interface{}{}) + + if err := manager.Run(); err != nil { + analytics.Client.Publish("backup-failure", map[string]interface{}{"clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) + if b.LogFile != nil { + log.Errorf("backup failed - log file saved to %s", b.LogFile.Name()) + } + return err + } + + analytics.Client.Publish("backup-success", map[string]interface{}{"duration": time.Since(start), "clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) + + duration := time.Since(start).Truncate(time.Second) + text := fmt.Sprintf("==> Finished in %s", duration) + log.Infof(phase.Colorize.Green(text).String()) + return nil +} diff --git a/cmd/backup.go b/cmd/backup.go index 155ae3e4..0698fede 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -1,13 +1,10 @@ package cmd import ( - "fmt" - "time" + "os" - "github.com/k0sproject/k0sctl/analytics" - "github.com/k0sproject/k0sctl/phase" + "github.com/k0sproject/k0sctl/action" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" - log "github.com/sirupsen/logrus" "github.com/urfave/cli/v2" ) @@ -28,42 +25,23 @@ var backupCommand = &cli.Command{ Before: actions(initLogging, startCheckUpgrade, initConfig, displayLogo, initAnalytics, displayCopyright), After: actions(reportCheckUpgrade, closeAnalytics), Action: func(ctx *cli.Context) error { - start := time.Now() - - manager := phase.Manager{Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), Concurrency: ctx.Int("concurrency")} - lockPhase := &phase.Lock{} - - manager.AddPhase( - &phase.Connect{}, - &phase.DetectOS{}, - lockPhase, - &phase.PrepareHosts{}, - &phase.GatherFacts{}, - &phase.GatherK0sFacts{}, - &phase.RunHooks{Stage: "before", Action: "backup"}, - &phase.Backup{}, - &phase.RunHooks{Stage: "after", Action: "backup"}, - &phase.Unlock{Cancel: lockPhase.Cancel}, - &phase.Disconnect{}, - ) + logWriter, err := LogFile() + if err != nil { + return err + } - analytics.Client.Publish("backup-start", map[string]interface{}{}) + var lf *os.File - if err := manager.Run(); err != nil { - analytics.Client.Publish("backup-failure", map[string]interface{}{"clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) - if lf, err := LogFile(); err == nil { - if ln, ok := lf.(interface{ Name() string }); ok { - log.Errorf("backup failed - log file saved to %s", ln.Name()) - } - } - return err + if l, ok := logWriter.(*os.File); ok && l != nil { + lf = l } - analytics.Client.Publish("backup-success", map[string]interface{}{"duration": time.Since(start), "clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) + backupAction := action.Backup{ + Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), + Concurrency: ctx.Int("concurrency"), + LogFile: lf, + } - duration := time.Since(start).Truncate(time.Second) - text := fmt.Sprintf("==> Finished in %s", duration) - log.Infof(Colorize.Green(text).String()) - return nil + return backupAction.Run() }, } From 9d9723d9e2aeaaaf880b0485d807922e292ff8c6 Mon Sep 17 00:00:00 2001 From: Kimmo Lehto Date: Thu, 15 Jun 2023 14:35:25 +0300 Subject: [PATCH 5/6] Extract logic from cmd/config_edit into action.ConfigEdit Signed-off-by: Kimmo Lehto --- action/config_edit.go | 106 ++++++++++++++++++++++++++++++++++++++++++ cmd/config_edit.go | 93 ++++-------------------------------- 2 files changed, 114 insertions(+), 85 deletions(-) create mode 100644 action/config_edit.go diff --git a/action/config_edit.go b/action/config_edit.go new file mode 100644 index 00000000..2fa3ceb1 --- /dev/null +++ b/action/config_edit.go @@ -0,0 +1,106 @@ +package action + +import ( + "fmt" + "io" + "os" + + "github.com/k0sproject/k0sctl/analytics" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/rig/exec" + + osexec "os/exec" + + "github.com/mattn/go-isatty" +) + +type ConfigEdit struct { + Config *v1beta1.Cluster + Concurrency int + Stdout io.Writer + Stderr io.Writer + Stdin io.Reader +} + +func (c ConfigEdit) Run() error { + stdoutFile, ok := c.Stdout.(*os.File) + + if !ok || !isatty.IsTerminal(stdoutFile.Fd()) { + return fmt.Errorf("output is not a terminal") + } + + analytics.Client.Publish("config-edit-start", map[string]interface{}{}) + + editor, err := shellEditor() + if err != nil { + return err + } + + h := c.Config.Spec.K0sLeader() + + if err := h.Connect(); err != nil { + return fmt.Errorf("failed to connect: %w", err) + } + defer h.Disconnect() + + if err := h.ResolveConfigurer(); err != nil { + return err + } + + oldCfg, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "-n kube-system get clusterconfig k0s -o yaml"), exec.Sudo(h)) + if err != nil { + return fmt.Errorf("%s: %w", h, err) + } + + tmpFile, err := os.CreateTemp("", "k0s-config.*.yaml") + if err != nil { + return err + } + defer func() { _ = os.Remove(tmpFile.Name()) }() + + if _, err := tmpFile.WriteString(oldCfg); err != nil { + return err + } + + if err := tmpFile.Close(); err != nil { + return err + } + + cmd := osexec.Command(editor, tmpFile.Name()) + cmd.Stdin = c.Stdin + cmd.Stdout = c.Stdout + cmd.Stderr = c.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to start editor (%s): %w", cmd.String(), err) + } + + newCfgBytes, err := os.ReadFile(tmpFile.Name()) + if err != nil { + return err + } + newCfg := string(newCfgBytes) + + if newCfg == oldCfg { + return fmt.Errorf("configuration was not changed, aborting") + } + + if err := h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "apply -n kube-system -f -"), exec.Stdin(newCfg), exec.Sudo(h)); err != nil { + return err + } + + return nil +} + +func shellEditor() (string, error) { + if v := os.Getenv("VISUAL"); v != "" { + return v, nil + } + if v := os.Getenv("EDITOR"); v != "" { + return v, nil + } + if path, err := osexec.LookPath("vi"); err == nil { + return path, nil + } + + return "", fmt.Errorf("could not detect shell editor ($VISUAL, $EDITOR)") +} diff --git a/cmd/config_edit.go b/cmd/config_edit.go index f1fcb3e5..c2d1b3d1 100644 --- a/cmd/config_edit.go +++ b/cmd/config_edit.go @@ -1,33 +1,12 @@ package cmd import ( - "fmt" - "os" - - "github.com/k0sproject/k0sctl/analytics" + "github.com/k0sproject/k0sctl/action" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" - "github.com/k0sproject/rig/exec" - - osexec "os/exec" - "github.com/mattn/go-isatty" "github.com/urfave/cli/v2" ) -func shellEditor() (string, error) { - if v := os.Getenv("VISUAL"); v != "" { - return v, nil - } - if v := os.Getenv("EDITOR"); v != "" { - return v, nil - } - if path, err := osexec.LookPath("vi"); err == nil { - return path, nil - } - - return "", fmt.Errorf("could not detect shell editor ($VISUAL, $EDITOR)") -} - var configEditCommand = &cli.Command{ Name: "edit", Usage: "Edit k0s dynamic config in SHELL's default editor", @@ -42,70 +21,14 @@ var configEditCommand = &cli.Command{ Before: actions(initLogging, startCheckUpgrade, initConfig, initAnalytics), After: actions(reportCheckUpgrade, closeAnalytics), Action: func(ctx *cli.Context) error { - if !isatty.IsTerminal(os.Stdout.Fd()) { - return fmt.Errorf("output is not a terminal") - } - - analytics.Client.Publish("config-edit-start", map[string]interface{}{}) - - editor, err := shellEditor() - if err != nil { - return err - } - - c := ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster) - h := c.Spec.K0sLeader() - - if err := h.Connect(); err != nil { - return fmt.Errorf("failed to connect: %w", err) - } - defer h.Disconnect() - - if err := h.ResolveConfigurer(); err != nil { - return err - } - - oldCfg, err := h.ExecOutput(h.Configurer.K0sCmdf("kubectl --data-dir=%s -n kube-system get clusterconfig k0s -o yaml", h.K0sDataDir()), exec.Sudo(h)) - if err != nil { - return fmt.Errorf("%s: %w", h, err) - } - - tmpFile, err := os.CreateTemp("", "k0s-config.*.yaml") - if err != nil { - return err - } - defer func() { _ = os.Remove(tmpFile.Name()) }() - - if _, err := tmpFile.WriteString(oldCfg); err != nil { - return err - } - - if err := tmpFile.Close(); err != nil { - return err - } - - cmd := osexec.Command(editor, tmpFile.Name()) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to start editor (%s): %w", cmd.String(), err) - } - - newCfgBytes, err := os.ReadFile(tmpFile.Name()) - if err != nil { - return err - } - newCfg := string(newCfgBytes) - - if newCfg == oldCfg { - return fmt.Errorf("configuration was not changed, aborting") - } - - if err := h.Exec(h.Configurer.K0sCmdf("kubectl apply --data-dir=%s -n kube-system -f -", h.K0sDataDir()), exec.Stdin(newCfg), exec.Sudo(h)); err != nil { - return err + configEditAction := action.ConfigEdit{ + Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), + Concurrency: ctx.Int("concurrency"), + Stdout: ctx.App.Writer, + Stderr: ctx.App.ErrWriter, + Stdin: ctx.App.Reader, } - return nil + return configEditAction.Run() }, } From 9c7f709d7ef235107678358255dee1e618d9ea1a Mon Sep 17 00:00:00 2001 From: Kimmo Lehto Date: Thu, 15 Jun 2023 14:40:40 +0300 Subject: [PATCH 6/6] Extract logic from cmd/config_status into action.ConfigStatus Signed-off-by: Kimmo Lehto --- action/apply.go | 53 +++++++++++++---------------------------- action/backup.go | 20 +++++----------- action/config_edit.go | 9 ++++--- action/config_status.go | 44 ++++++++++++++++++++++++++++++++++ action/kubeconfig.go | 20 ++++------------ action/reset.go | 26 +++++++++----------- cmd/apply.go | 40 ++++++++++++++++--------------- cmd/backup.go | 25 +++++++------------ cmd/config_edit.go | 9 ++++--- cmd/config_status.go | 33 +++++-------------------- cmd/flags.go | 30 +++++++++++++++++++---- cmd/kubeconfig.go | 19 ++++----------- cmd/reset.go | 18 +++++++++----- phase/manager.go | 11 +++++++++ 14 files changed, 180 insertions(+), 177 deletions(-) create mode 100644 action/config_status.go diff --git a/action/apply.go b/action/apply.go index cf697fa4..984b8e08 100644 --- a/action/apply.go +++ b/action/apply.go @@ -2,23 +2,18 @@ package action import ( "fmt" - "os" + "io" "time" "github.com/k0sproject/k0sctl/analytics" "github.com/k0sproject/k0sctl/phase" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" log "github.com/sirupsen/logrus" ) type Apply struct { - // Config is the k0sctl config - Config *v1beta1.Cluster - // Concurrency is the number of concurrent actions to run - Concurrency int - // ConcurrentUploads is the number of concurrent uploads to run - ConcurrentUploads int + // Manager is the phase manager + Manager *phase.Manager // DisableDowngradeCheck skips the downgrade check DisableDowngradeCheck bool // Force allows forced installation in case of certain failures @@ -29,28 +24,21 @@ type Apply struct { NoDrain bool // RestoreFrom is the path to a cluster backup archive to restore the state from RestoreFrom string - // KubeconfigOut is the path to write the kubeconfig to - KubeconfigOut string + // KubeconfigOut is a writer to write the kubeconfig to + KubeconfigOut io.Writer // KubeconfigAPIAddress is the API address to use in the kubeconfig KubeconfigAPIAddress string - // LogFile is the path where log will be found from - LogFile *os.File } func (a Apply) Run() error { start := time.Now() - if a.Config == nil { - return fmt.Errorf("config is nil") - } - phase.NoWait = a.NoWait phase.Force = a.Force - manager := phase.Manager{Config: a.Config, Concurrency: a.Concurrency, ConcurrentUploads: a.ConcurrentUploads} lockPhase := &phase.Lock{} - manager.AddPhase( + a.Manager.AddPhase( &phase.Connect{}, &phase.DetectOS{}, lockPhase, @@ -80,13 +68,11 @@ func (a Apply) Run() error { &phase.RunHooks{Stage: "after", Action: "apply"}, ) - var kubeCfgPhase *phase.GetKubeconfig - if a.KubeconfigOut != "" { - kubeCfgPhase = &phase.GetKubeconfig{APIAddress: a.KubeconfigAPIAddress} - manager.AddPhase(kubeCfgPhase) + if a.KubeconfigOut != nil { + a.Manager.AddPhase(&phase.GetKubeconfig{APIAddress: a.KubeconfigAPIAddress}) } - manager.AddPhase( + a.Manager.AddPhase( &phase.Unlock{Cancel: lockPhase.Cancel}, &phase.Disconnect{}, ) @@ -95,20 +81,15 @@ func (a Apply) Run() error { var result error - if result = manager.Run(); result != nil { - analytics.Client.Publish("apply-failure", map[string]interface{}{"clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) - if lf := a.LogFile; lf != nil { - log.Errorf("apply failed - log file saved to %s", lf.Name()) - } + if result = a.Manager.Run(); result != nil { + analytics.Client.Publish("apply-failure", map[string]interface{}{"clusterID": a.Manager.Config.Spec.K0s.Metadata.ClusterID}) return result } - analytics.Client.Publish("apply-success", map[string]interface{}{"duration": time.Since(start), "clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) - if a.KubeconfigOut != "" { - if err := os.WriteFile(a.KubeconfigOut, []byte(manager.Config.Metadata.Kubeconfig), 0644); err != nil { + analytics.Client.Publish("apply-success", map[string]interface{}{"duration": time.Since(start), "clusterID": a.Manager.Config.Spec.K0s.Metadata.ClusterID}) + if a.KubeconfigOut != nil { + if _, err := a.KubeconfigOut.Write([]byte(a.Manager.Config.Metadata.Kubeconfig)); err != nil { log.Warnf("failed to write kubeconfig to %s: %v", a.KubeconfigOut, err) - } else { - log.Infof("kubeconfig written to %s", a.KubeconfigOut) } } @@ -117,7 +98,7 @@ func (a Apply) Run() error { log.Infof(phase.Colorize.Green(text).String()) uninstalled := false - for _, host := range manager.Config.Spec.Hosts { + for _, host := range a.Manager.Config.Spec.Hosts { if host.Reset { uninstalled = true } @@ -126,9 +107,9 @@ func (a Apply) Run() error { log.Info("There were nodes that got uninstalled during the apply phase. Please remove them from your k0sctl config file") } - log.Infof("k0s cluster version %s is now installed", manager.Config.Spec.K0s.Version) + log.Infof("k0s cluster version %s is now installed", a.Manager.Config.Spec.K0s.Version) - if a.KubeconfigOut != "" { + if a.KubeconfigOut != nil { log.Infof("Tip: To access the cluster you can now fetch the admin kubeconfig using:") log.Infof(" " + phase.Colorize.Cyan("k0sctl kubeconfig").String()) } diff --git a/action/backup.go b/action/backup.go index e4656d61..73c26acd 100644 --- a/action/backup.go +++ b/action/backup.go @@ -2,29 +2,24 @@ package action import ( "fmt" - "os" "time" "github.com/k0sproject/k0sctl/analytics" "github.com/k0sproject/k0sctl/phase" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" log "github.com/sirupsen/logrus" ) type Backup struct { - Config *v1beta1.Cluster - Concurrency int - ConcurrentUploads int - LogFile *os.File + // Manager is the phase manager + Manager *phase.Manager } func (b Backup) Run() error { start := time.Now() - manager := phase.Manager{Config: b.Config, Concurrency: b.Concurrency} lockPhase := &phase.Lock{} - manager.AddPhase( + b.Manager.AddPhase( &phase.Connect{}, &phase.DetectOS{}, lockPhase, @@ -40,15 +35,12 @@ func (b Backup) Run() error { analytics.Client.Publish("backup-start", map[string]interface{}{}) - if err := manager.Run(); err != nil { - analytics.Client.Publish("backup-failure", map[string]interface{}{"clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) - if b.LogFile != nil { - log.Errorf("backup failed - log file saved to %s", b.LogFile.Name()) - } + if err := b.Manager.Run(); err != nil { + analytics.Client.Publish("backup-failure", map[string]interface{}{"clusterID": b.Manager.Config.Spec.K0s.Metadata.ClusterID}) return err } - analytics.Client.Publish("backup-success", map[string]interface{}{"duration": time.Since(start), "clusterID": manager.Config.Spec.K0s.Metadata.ClusterID}) + analytics.Client.Publish("backup-success", map[string]interface{}{"duration": time.Since(start), "clusterID": b.Manager.Config.Spec.K0s.Metadata.ClusterID}) duration := time.Since(start).Truncate(time.Second) text := fmt.Sprintf("==> Finished in %s", duration) diff --git a/action/config_edit.go b/action/config_edit.go index 2fa3ceb1..c74ab04a 100644 --- a/action/config_edit.go +++ b/action/config_edit.go @@ -15,11 +15,10 @@ import ( ) type ConfigEdit struct { - Config *v1beta1.Cluster - Concurrency int - Stdout io.Writer - Stderr io.Writer - Stdin io.Reader + Config *v1beta1.Cluster + Stdout io.Writer + Stderr io.Writer + Stdin io.Reader } func (c ConfigEdit) Run() error { diff --git a/action/config_status.go b/action/config_status.go new file mode 100644 index 00000000..d7807af8 --- /dev/null +++ b/action/config_status.go @@ -0,0 +1,44 @@ +package action + +import ( + "fmt" + "io" + + "github.com/k0sproject/k0sctl/analytics" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/rig/exec" +) + +type ConfigStatus struct { + Config *v1beta1.Cluster + Concurrency int + Format string + Writer io.Writer +} + +func (c ConfigStatus) Run() error { + analytics.Client.Publish("config-status-start", map[string]interface{}{}) + + h := c.Config.Spec.K0sLeader() + + if err := h.Connect(); err != nil { + return fmt.Errorf("failed to connect: %w", err) + } + defer h.Disconnect() + + if err := h.ResolveConfigurer(); err != nil { + return err + } + format := c.Format + if format != "" { + format = "-o " + format + } + + output, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "-n kube-system get event --field-selector involvedObject.name=k0s %s", format), exec.Sudo(h)) + if err != nil { + return fmt.Errorf("%s: %w", h, err) + } + fmt.Fprintln(c.Writer, output) + + return nil +} diff --git a/action/kubeconfig.go b/action/kubeconfig.go index 5a378e4b..63b7c9c4 100644 --- a/action/kubeconfig.go +++ b/action/kubeconfig.go @@ -1,39 +1,29 @@ package action import ( - "fmt" - "github.com/k0sproject/k0sctl/phase" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" ) type Kubeconfig struct { - Config *v1beta1.Cluster - Concurrency int + // Manager is the phase manager + Manager *phase.Manager KubeconfigAPIAddress string Kubeconfig string } func (k *Kubeconfig) Run() error { - if k.Config == nil { - return fmt.Errorf("config is nil") - } - - c := k.Config - // Change so that the internal config has only single controller host as we // do not need to connect to all nodes - c.Spec.Hosts = cluster.Hosts{c.Spec.K0sLeader()} - manager := phase.Manager{Config: k.Config, Concurrency: k.Concurrency} + k.Manager.Config.Spec.Hosts = cluster.Hosts{k.Manager.Config.Spec.K0sLeader()} - manager.AddPhase( + k.Manager.AddPhase( &phase.Connect{}, &phase.DetectOS{}, &phase.GetKubeconfig{APIAddress: k.KubeconfigAPIAddress}, &phase.Disconnect{}, ) - return manager.Run() + return k.Manager.Run() } diff --git a/action/reset.go b/action/reset.go index 8bc0909c..3736ecb8 100644 --- a/action/reset.go +++ b/action/reset.go @@ -2,12 +2,12 @@ package action import ( "fmt" + "io" "os" "time" "github.com/k0sproject/k0sctl/analytics" "github.com/k0sproject/k0sctl/phase" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" log "github.com/sirupsen/logrus" "github.com/AlecAivazis/survey/v2" @@ -15,18 +15,15 @@ import ( ) type Reset struct { - Config *v1beta1.Cluster - Concurrency int - Force bool + // Manager is the phase manager + Manager *phase.Manager + Stdout io.Writer + Force bool } func (r Reset) Run() error { - if r.Config == nil { - return fmt.Errorf("config is nil") - } - if !r.Force { - if !isatty.IsTerminal(os.Stdout.Fd()) { + if stdoutFile, ok := r.Stdout.(*os.File); ok && !isatty.IsTerminal(stdoutFile.Fd()) { return fmt.Errorf("reset requires --force") } confirmed := false @@ -41,13 +38,12 @@ func (r Reset) Run() error { start := time.Now() - manager := phase.Manager{Config: r.Config, Concurrency: r.Concurrency} - for _, h := range r.Config.Spec.Hosts { + for _, h := range r.Manager.Config.Spec.Hosts { h.Reset = true } lockPhase := &phase.Lock{} - manager.AddPhase( + r.Manager.AddPhase( &phase.Connect{}, &phase.DetectOS{}, lockPhase, @@ -71,12 +67,12 @@ func (r Reset) Run() error { analytics.Client.Publish("reset-start", map[string]interface{}{}) - if err := manager.Run(); err != nil { - analytics.Client.Publish("reset-failure", map[string]interface{}{"clusterID": r.Config.Spec.K0s.Metadata.ClusterID}) + if err := r.Manager.Run(); err != nil { + analytics.Client.Publish("reset-failure", map[string]interface{}{"clusterID": r.Manager.Config.Spec.K0s.Metadata.ClusterID}) return err } - analytics.Client.Publish("reset-success", map[string]interface{}{"duration": time.Since(start), "clusterID": r.Config.Spec.K0s.Metadata.ClusterID}) + analytics.Client.Publish("reset-success", map[string]interface{}{"duration": time.Since(start), "clusterID": r.Manager.Config.Spec.K0s.Metadata.ClusterID}) duration := time.Since(start).Truncate(time.Second) text := fmt.Sprintf("==> Finished in %s", duration) diff --git a/cmd/apply.go b/cmd/apply.go index b446f077..3b42a111 100644 --- a/cmd/apply.go +++ b/cmd/apply.go @@ -1,10 +1,12 @@ package cmd import ( + "fmt" + "io" "os" "github.com/k0sproject/k0sctl/action" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/k0sctl/phase" "github.com/urfave/cli/v2" ) @@ -55,35 +57,35 @@ var applyCommand = &cli.Command{ analyticsFlag, upgradeCheckFlag, }, - Before: actions(initLogging, startCheckUpgrade, initConfig, displayLogo, initAnalytics, displayCopyright, warnOldCache), + Before: actions(initLogging, startCheckUpgrade, initConfig, initManager, displayLogo, initAnalytics, displayCopyright, warnOldCache), After: actions(reportCheckUpgrade, closeAnalytics), Action: func(ctx *cli.Context) error { + var kubeconfigOut io.Writer - logWriter, err := LogFile() - if err != nil { - return err + if kc := ctx.String("kubeconfig-out"); kc != "" { + out, err := os.OpenFile(kc, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return fmt.Errorf("failed to open kubeconfig-out file: %w", err) + } + defer out.Close() + kubeconfigOut = out } - var lf *os.File - - if l, ok := logWriter.(*os.File); ok && l != nil { - lf = l - } - - applier := action.Apply{ + applyAction := action.Apply{ Force: ctx.Bool("force"), + Manager: ctx.Context.Value(ctxManagerKey{}).(*phase.Manager), + KubeconfigOut: kubeconfigOut, + KubeconfigAPIAddress: ctx.String("kubeconfig-api-address"), NoWait: ctx.Bool("no-wait"), NoDrain: ctx.Bool("no-drain"), - Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), - Concurrency: ctx.Int("concurrency"), - ConcurrentUploads: ctx.Int("concurrent-uploads"), DisableDowngradeCheck: ctx.Bool("disable-downgrade-check"), - KubeconfigOut: ctx.String("kubeconfig-out"), - KubeconfigAPIAddress: ctx.String("kubeconfig-api-address"), RestoreFrom: ctx.String("restore-from"), - LogFile: lf, } - return applier.Run() + if err := applyAction.Run(); err != nil { + return fmt.Errorf("apply failed - log file saved to %s: %w", ctx.Context.Value(ctxLogFileKey{}).(string), err) + } + + return nil }, } diff --git a/cmd/backup.go b/cmd/backup.go index 0698fede..cbe4b9af 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -1,10 +1,10 @@ package cmd import ( - "os" + "fmt" "github.com/k0sproject/k0sctl/action" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/k0sctl/phase" "github.com/urfave/cli/v2" ) @@ -22,26 +22,17 @@ var backupCommand = &cli.Command{ analyticsFlag, upgradeCheckFlag, }, - Before: actions(initLogging, startCheckUpgrade, initConfig, displayLogo, initAnalytics, displayCopyright), + Before: actions(initLogging, startCheckUpgrade, initConfig, initManager, displayLogo, initAnalytics, displayCopyright), After: actions(reportCheckUpgrade, closeAnalytics), Action: func(ctx *cli.Context) error { - logWriter, err := LogFile() - if err != nil { - return err - } - - var lf *os.File - - if l, ok := logWriter.(*os.File); ok && l != nil { - lf = l + backupAction := action.Backup{ + Manager: ctx.Context.Value(ctxManagerKey{}).(*phase.Manager), } - backupAction := action.Backup{ - Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), - Concurrency: ctx.Int("concurrency"), - LogFile: lf, + if err := backupAction.Run(); err != nil { + return fmt.Errorf("backup failed - log file saved to %s: %w", ctx.Context.Value(ctxLogFileKey{}).(string), err) } - return backupAction.Run() + return nil }, } diff --git a/cmd/config_edit.go b/cmd/config_edit.go index c2d1b3d1..a744b864 100644 --- a/cmd/config_edit.go +++ b/cmd/config_edit.go @@ -22,11 +22,10 @@ var configEditCommand = &cli.Command{ After: actions(reportCheckUpgrade, closeAnalytics), Action: func(ctx *cli.Context) error { configEditAction := action.ConfigEdit{ - Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), - Concurrency: ctx.Int("concurrency"), - Stdout: ctx.App.Writer, - Stderr: ctx.App.ErrWriter, - Stdin: ctx.App.Reader, + Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), + Stdout: ctx.App.Writer, + Stderr: ctx.App.ErrWriter, + Stdin: ctx.App.Reader, } return configEditAction.Run() diff --git a/cmd/config_status.go b/cmd/config_status.go index 716a5083..079354f8 100644 --- a/cmd/config_status.go +++ b/cmd/config_status.go @@ -1,11 +1,8 @@ package cmd import ( - "fmt" - - "github.com/k0sproject/k0sctl/analytics" + "github.com/k0sproject/k0sctl/action" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" - "github.com/k0sproject/rig/exec" "github.com/urfave/cli/v2" ) @@ -29,30 +26,12 @@ var configStatusCommand = &cli.Command{ Before: actions(initLogging, startCheckUpgrade, initConfig, initAnalytics), After: actions(reportCheckUpgrade, closeAnalytics), Action: func(ctx *cli.Context) error { - analytics.Client.Publish("config-status-start", map[string]interface{}{}) - - c := ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster) - h := c.Spec.K0sLeader() - - if err := h.Connect(); err != nil { - return fmt.Errorf("failed to connect: %w", err) - } - defer h.Disconnect() - - if err := h.ResolveConfigurer(); err != nil { - return err - } - format := ctx.String("output") - if format != "" { - format = "-o " + format - } - - output, err := h.ExecOutput(h.Configurer.K0sCmdf("kubectl --data-dir=%s -n kube-system get event --field-selector involvedObject.name=k0s %s", h.K0sDataDir(), format), exec.Sudo(h)) - if err != nil { - return fmt.Errorf("%s: %w", h, err) + configStatusAction := action.ConfigStatus{ + Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), + Format: ctx.String("output"), + Writer: ctx.App.Writer, } - fmt.Println(output) - return nil + return configStatusAction.Run() }, } diff --git a/cmd/flags.go b/cmd/flags.go index 4c6a1f48..6a0d0f81 100644 --- a/cmd/flags.go +++ b/cmd/flags.go @@ -30,6 +30,8 @@ import ( ) type ctxConfigKey struct{} +type ctxManagerKey struct{} +type ctxLogFileKey struct{} var ( debugFlag = &cli.BoolFlag{ @@ -212,6 +214,25 @@ func closeAnalytics(_ *cli.Context) error { return nil } +func initManager(ctx *cli.Context) error { + c, ok := ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster) + if c == nil || !ok { + return fmt.Errorf("cluster config not available in context") + } + + manager, err := phase.NewManager(c) + if err != nil { + return fmt.Errorf("failed to initialize phase manager: %w", err) + } + + manager.Concurrency = ctx.Int("concurrency") + manager.ConcurrentUploads = ctx.Int("concurrent-uploads") + + ctx.Context = context.WithValue(ctx.Context, ctxManagerKey{}, manager) + + return nil +} + // initLogging initializes the logger func initLogging(ctx *cli.Context) error { log.SetLevel(log.TraceLevel) @@ -219,7 +240,7 @@ func initLogging(ctx *cli.Context) error { initScreenLogger(logLevelFromCtx(ctx, log.InfoLevel)) exec.DisableRedact = ctx.Bool("no-redact") rig.SetLogger(log.StandardLogger()) - return initFileLogger() + return initFileLogger(ctx) } // initSilentLogging initializes the logger in silent mode @@ -230,7 +251,7 @@ func initSilentLogging(ctx *cli.Context) error { exec.DisableRedact = ctx.Bool("no-redact") initScreenLogger(logLevelFromCtx(ctx, log.FatalLevel)) rig.SetLogger(log.StandardLogger()) - return initFileLogger() + return initFileLogger(ctx) } func logLevelFromCtx(ctx *cli.Context, defaultLevel log.Level) log.Level { @@ -247,18 +268,19 @@ func initScreenLogger(lvl log.Level) { log.AddHook(screenLoggerHook(lvl)) } -func initFileLogger() error { +func initFileLogger(ctx *cli.Context) error { lf, err := LogFile() if err != nil { return err } log.AddHook(fileLoggerHook(lf)) + ctx.Context = context.WithValue(ctx.Context, ctxLogFileKey{}, lf.Name()) return nil } const logPath = "k0sctl/k0sctl.log" -func LogFile() (io.Writer, error) { +func LogFile() (*os.File, error) { fn, err := xdg.SearchCacheFile(logPath) if err != nil { fn, err = xdg.CacheFile(logPath) diff --git a/cmd/kubeconfig.go b/cmd/kubeconfig.go index f05f0f20..6004ba47 100644 --- a/cmd/kubeconfig.go +++ b/cmd/kubeconfig.go @@ -5,7 +5,7 @@ import ( "github.com/k0sproject/k0sctl/action" "github.com/k0sproject/k0sctl/analytics" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/k0sctl/phase" "github.com/urfave/cli/v2" ) @@ -19,7 +19,6 @@ var kubeconfigCommand = &cli.Command{ Value: "", }, configFlag, - concurrencyFlag, debugFlag, traceFlag, redactFlag, @@ -27,30 +26,22 @@ var kubeconfigCommand = &cli.Command{ retryTimeoutFlag, analyticsFlag, }, - Before: actions(initSilentLogging, initConfig, initAnalytics), + Before: actions(initSilentLogging, initConfig, initManager, initAnalytics), After: func(_ *cli.Context) error { analytics.Client.Close() return nil }, Action: func(ctx *cli.Context) error { - var cfg *v1beta1.Cluster - if c, ok := ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster); ok { - cfg = c - } else { - return fmt.Errorf("config is nil") - } - kubeconfigAction := action.Kubeconfig{ - Config: cfg, - Concurrency: ctx.Int("concurrency"), + Manager: ctx.Context.Value(ctxManagerKey{}).(*phase.Manager), KubeconfigAPIAddress: ctx.String("address"), } if err := kubeconfigAction.Run(); err != nil { - return err + return fmt.Errorf("getting kubeconfig failed - log file saved to %s: %w", ctx.Context.Value(ctxLogFileKey{}).(string), err) } - _, err := fmt.Fprintf(ctx.App.Writer, "%s\n", cfg.Metadata.Kubeconfig) + _, err := fmt.Fprintf(ctx.App.Writer, "%s\n", kubeconfigAction.Manager.Config.Metadata.Kubeconfig) return err }, } diff --git a/cmd/reset.go b/cmd/reset.go index 24d19ec8..0af7269d 100644 --- a/cmd/reset.go +++ b/cmd/reset.go @@ -1,8 +1,10 @@ package cmd import ( + "fmt" + "github.com/k0sproject/k0sctl/action" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" + "github.com/k0sproject/k0sctl/phase" "github.com/urfave/cli/v2" ) @@ -26,15 +28,19 @@ var resetCommand = &cli.Command{ Aliases: []string{"f"}, }, }, - Before: actions(initLogging, startCheckUpgrade, initConfig, initAnalytics, displayCopyright), + Before: actions(initLogging, startCheckUpgrade, initConfig, initManager, initAnalytics, displayCopyright), After: actions(reportCheckUpgrade, closeAnalytics), Action: func(ctx *cli.Context) error { resetAction := action.Reset{ - Config: ctx.Context.Value(ctxConfigKey{}).(*v1beta1.Cluster), - Concurrency: ctx.Int("concurrency"), - Force: ctx.Bool("force"), + Manager: ctx.Context.Value(ctxManagerKey{}).(*phase.Manager), + Force: ctx.Bool("force"), + Stdout: ctx.App.Writer, + } + + if err := resetAction.Run(); err != nil { + return fmt.Errorf("reset failed - log file saved to %s: %w", ctx.Context.Value(ctxLogFileKey{}).(string), err) } - return resetAction.Run() + return nil }, } diff --git a/phase/manager.go b/phase/manager.go index 12910e51..05de3f8c 100644 --- a/phase/manager.go +++ b/phase/manager.go @@ -1,6 +1,8 @@ package phase import ( + "fmt" + "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" "github.com/logrusorgru/aurora" @@ -59,6 +61,15 @@ type Manager struct { ConcurrentUploads int } +// NewManager creates a new Manager +func NewManager(config *v1beta1.Cluster) (*Manager, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + return &Manager{Config: config}, nil +} + // AddPhase adds a Phase to Manager func (m *Manager) AddPhase(p ...phase) { m.phases = append(m.phases, p...)