From a6aa06b5e4a7959f5b0f91b93193ac156a0593ea Mon Sep 17 00:00:00 2001 From: Yu Xiang Zhang Date: Fri, 26 Jan 2024 22:55:06 +0000 Subject: [PATCH] Fix deleting cluster sometimes drain managed nodegroups --- pkg/actions/cluster/delete.go | 3 ++- pkg/actions/cluster/delete_test.go | 6 +++--- pkg/actions/cluster/owned_test.go | 4 ++-- pkg/actions/cluster/unowned_test.go | 4 ++-- pkg/ctl/cmdutils/cluster.go | 6 +++--- pkg/ctl/create/cluster.go | 2 +- pkg/ctl/delete/nodegroup.go | 2 +- pkg/ctl/drain/nodegroup.go | 2 +- 8 files changed, 15 insertions(+), 14 deletions(-) diff --git a/pkg/actions/cluster/delete.go b/pkg/actions/cluster/delete.go index d3e247d5f7..1579d7a060 100644 --- a/pkg/actions/cluster/delete.go +++ b/pkg/actions/cluster/delete.go @@ -178,10 +178,11 @@ func drainAllNodeGroups(ctx context.Context, cfg *api.ClusterConfig, ctl *eks.Cl } } + // EKS automatically drains managed nodegroups logger.Info("will drain %d unmanaged nodegroup(s) in cluster %q", len(cfg.NodeGroups), cfg.Metadata.Name) drainInput := &nodegroup.DrainInput{ - NodeGroups: cmdutils.ToKubeNodeGroups(cfg), + NodeGroups: cmdutils.ToKubeNodeGroups(cfg.NodeGroups, []*api.ManagedNodeGroup{}), MaxGracePeriod: ctl.AWSProvider.WaitTimeout(), DisableEviction: disableEviction, PodEvictionWaitPeriod: podEvictionWaitPeriod, diff --git a/pkg/actions/cluster/delete_test.go b/pkg/actions/cluster/delete_test.go index 011c4dace5..903029b1c6 100644 --- a/pkg/actions/cluster/delete_test.go +++ b/pkg/actions/cluster/delete_test.go @@ -59,7 +59,7 @@ var _ = Describe("DrainAllNodeGroups", func() { nodeGroupStacks := []manager.NodeGroupStack{{NodeGroupName: "ng-1"}} mockedDrainInput := &nodegroup.DrainInput{ - NodeGroups: cmdutils.ToKubeNodeGroups(cfg), + NodeGroups: cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups), MaxGracePeriod: ctl.AWSProvider.WaitTimeout(), Parallel: 1, } @@ -87,7 +87,7 @@ var _ = Describe("DrainAllNodeGroups", func() { nodeGroupStacks := []manager.NodeGroupStack{{NodeGroupName: "ng-1"}} mockedDrainInput := &nodegroup.DrainInput{ - NodeGroups: cmdutils.ToKubeNodeGroups(cfg), + NodeGroups: cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups), MaxGracePeriod: ctl.AWSProvider.WaitTimeout(), DisableEviction: true, Parallel: 1, @@ -116,7 +116,7 @@ var _ = Describe("DrainAllNodeGroups", func() { var nodeGroupStacks []manager.NodeGroupStack mockedDrainInput := &nodegroup.DrainInput{ - NodeGroups: cmdutils.ToKubeNodeGroups(cfg), + NodeGroups: cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups), MaxGracePeriod: ctl.AWSProvider.WaitTimeout(), Parallel: 1, } diff --git a/pkg/actions/cluster/owned_test.go b/pkg/actions/cluster/owned_test.go index 9642eeebdf..a84fcefb22 100644 --- a/pkg/actions/cluster/owned_test.go +++ b/pkg/actions/cluster/owned_test.go @@ -188,7 +188,7 @@ var _ = Describe("Delete", func() { }) mockedDrainInput := &nodegroup.DrainInput{ - NodeGroups: cmdutils.ToKubeNodeGroups(cfg), + NodeGroups: cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups), MaxGracePeriod: ctl.AWSProvider.WaitTimeout(), Parallel: 1, } @@ -253,7 +253,7 @@ var _ = Describe("Delete", func() { }) mockedDrainInput := &nodegroup.DrainInput{ - NodeGroups: cmdutils.ToKubeNodeGroups(cfg), + NodeGroups: cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups), MaxGracePeriod: ctl.AWSProvider.WaitTimeout(), Parallel: 1, } diff --git a/pkg/actions/cluster/unowned_test.go b/pkg/actions/cluster/unowned_test.go index 8d60f856b7..53a13ce409 100644 --- a/pkg/actions/cluster/unowned_test.go +++ b/pkg/actions/cluster/unowned_test.go @@ -246,7 +246,7 @@ var _ = Describe("Delete", func() { }) mockedDrainInput := &nodegroup.DrainInput{ - NodeGroups: cmdutils.ToKubeNodeGroups(cfg), + NodeGroups: cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups), MaxGracePeriod: ctl.AWSProvider.WaitTimeout(), Parallel: 1, } @@ -348,7 +348,7 @@ var _ = Describe("Delete", func() { }, } mockedDrainInput := &nodegroup.DrainInput{ - NodeGroups: cmdutils.ToKubeNodeGroups(cfg), + NodeGroups: cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups), MaxGracePeriod: ctl.AWSProvider.WaitTimeout(), Parallel: 1, } diff --git a/pkg/ctl/cmdutils/cluster.go b/pkg/ctl/cmdutils/cluster.go index 11ac469ccd..1c366655fe 100644 --- a/pkg/ctl/cmdutils/cluster.go +++ b/pkg/ctl/cmdutils/cluster.go @@ -34,12 +34,12 @@ func ApplyFilter(clusterConfig *api.ClusterConfig, ngFilter filter.NodegroupFilt // ToKubeNodeGroups combines managed and unmanaged nodegroups and returns a slice of eks.KubeNodeGroup containing // both types of nodegroups -func ToKubeNodeGroups(clusterConfig *api.ClusterConfig) []eks.KubeNodeGroup { +func ToKubeNodeGroups(unmanagedNodeGroups []*api.NodeGroup, managedNodeGroups []*api.ManagedNodeGroup) []eks.KubeNodeGroup { var kubeNodeGroups []eks.KubeNodeGroup - for _, ng := range clusterConfig.NodeGroups { + for _, ng := range unmanagedNodeGroups { kubeNodeGroups = append(kubeNodeGroups, ng) } - for _, ng := range clusterConfig.ManagedNodeGroups { + for _, ng := range managedNodeGroups { kubeNodeGroups = append(kubeNodeGroups, ng) } return kubeNodeGroups diff --git a/pkg/ctl/create/cluster.go b/pkg/ctl/create/cluster.go index 036fcd39e4..c94a6cbb2d 100644 --- a/pkg/ctl/create/cluster.go +++ b/pkg/ctl/create/cluster.go @@ -239,7 +239,7 @@ func doCreateCluster(cmd *cmdutils.Cmd, ngFilter *filter.NodeGroupFilter, params } } logFiltered := cmdutils.ApplyFilter(cfg, ngFilter) - kubeNodeGroups := cmdutils.ToKubeNodeGroups(cfg) + kubeNodeGroups := cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups) // Check if flux binary exists early in the process, so it doesn't fail at the end when the cluster // has already been created with a missing flux binary error which should have been caught earlier. diff --git a/pkg/ctl/delete/nodegroup.go b/pkg/ctl/delete/nodegroup.go index 78ecf942a8..dd86296639 100644 --- a/pkg/ctl/delete/nodegroup.go +++ b/pkg/ctl/delete/nodegroup.go @@ -136,7 +136,7 @@ func doDeleteNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, options deleteNodeG } } } - allNodeGroups := cmdutils.ToKubeNodeGroups(cfg) + allNodeGroups := cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups) if options.deleteNodeGroupDrain { cmdutils.LogIntendedAction(cmd.Plan, "drain %d nodegroup(s) in cluster %q", len(allNodeGroups), cfg.Metadata.Name) diff --git a/pkg/ctl/drain/nodegroup.go b/pkg/ctl/drain/nodegroup.go index c03113ae31..0dbe9badc1 100644 --- a/pkg/ctl/drain/nodegroup.go +++ b/pkg/ctl/drain/nodegroup.go @@ -127,7 +127,7 @@ func doDrainNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bo if cmd.Plan { return nil } - allNodeGroups := cmdutils.ToKubeNodeGroups(cfg) + allNodeGroups := cmdutils.ToKubeNodeGroups(cfg.NodeGroups, cfg.ManagedNodeGroups) drainInput := &nodegroup.DrainInput{ NodeGroups: allNodeGroups,