From ff6c04938df551e241c1f6ae66a95f4b13ff2c76 Mon Sep 17 00:00:00 2001 From: Ole Markus With Date: Wed, 19 Aug 2020 11:07:52 +0200 Subject: [PATCH] Add kops delete instance command Add support for deleting instance by k8s node name Add yes flag --- cmd/kops/BUILD.bazel | 1 + cmd/kops/delete.go | 8 +- cmd/kops/delete_instance.go | 289 +++++++++++++++++++++++++ docs/cli/kops.md | 2 +- docs/cli/kops_delete.md | 8 +- docs/cli/kops_delete_cluster.md | 2 +- docs/cli/kops_delete_instance.md | 67 ++++++ docs/cli/kops_delete_instancegroup.md | 2 +- docs/cli/kops_delete_secret.md | 2 +- docs/releases/1.19-NOTES.md | 2 + pkg/instancegroups/instancegroups.go | 28 ++- pkg/instancegroups/rollingupdate.go | 6 +- upup/pkg/fi/cloudup/awsup/aws_cloud.go | 3 + 13 files changed, 406 insertions(+), 14 deletions(-) create mode 100644 cmd/kops/delete_instance.go create mode 100644 docs/cli/kops_delete_instance.md diff --git a/cmd/kops/BUILD.bazel b/cmd/kops/BUILD.bazel index bdf0004b2ff1b..bf658890cd78d 100644 --- a/cmd/kops/BUILD.bazel +++ b/cmd/kops/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "create_secret_weave_encryptionconfig.go", "delete.go", "delete_cluster.go", + "delete_instance.go", "delete_instancegroup.go", "delete_secret.go", "describe.go", diff --git a/cmd/kops/delete.go b/cmd/kops/delete.go index 1e2d06775b5df..7a2953797933d 100644 --- a/cmd/kops/delete.go +++ b/cmd/kops/delete.go @@ -42,10 +42,13 @@ type DeleteOptions struct { var ( deleteLong = templates.LongDesc(i18n.T(` - Delete Kubernetes clusters, instancegroups, and secrets, or a combination of the before mentioned. + Delete Kubernetes clusters, instancegroups, instances, and secrets, or a combination of the before mentioned. `)) deleteExample = templates.Examples(i18n.T(` + # Delete an instance + kops delete instance i-0a5ed581b862d3425 + # Delete a cluster using a manifest file kops delete -f my-cluster.yaml @@ -60,7 +63,7 @@ var ( kops delete ig --name=k8s-cluster.example.com node-example --yes `)) - deleteShort = i18n.T("Delete clusters,instancegroups, or secrets.") + deleteShort = i18n.T("Delete clusters,instancegroups, instances, or secrets.") ) func NewCmdDelete(f *util.Factory, out io.Writer) *cobra.Command { @@ -90,6 +93,7 @@ func NewCmdDelete(f *util.Factory, out io.Writer) *cobra.Command { cmd.AddCommand(NewCmdDeleteCluster(f, out)) cmd.AddCommand(NewCmdDeleteInstanceGroup(f, out)) cmd.AddCommand(NewCmdDeleteSecret(f, out)) + cmd.AddCommand(NewCmdDeleteInstance(f, out)) return cmd } diff --git a/cmd/kops/delete_instance.go b/cmd/kops/delete_instance.go new file mode 100644 index 0000000000000..8a5b7ea7727db --- /dev/null +++ b/cmd/kops/delete_instance.go @@ -0,0 +1,289 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "io" + "os" + "time" + + "github.com/spf13/cobra" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/kops/cmd/kops/util" + kopsapi "k8s.io/kops/pkg/apis/kops" + "k8s.io/kops/pkg/cloudinstances" + "k8s.io/kops/pkg/instancegroups" + "k8s.io/kops/pkg/validation" + "k8s.io/kops/upup/pkg/fi/cloudup" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +// DeleteInstanceOptions is the command Object for an instance deletion. +type deleteInstanceOptions struct { + Yes bool + CloudOnly bool + + // The following two variables are when kops is validating a cluster + // between detach and deletion. + + // FailOnDrainError fail deletion if drain errors. + FailOnDrainError bool + + // FailOnValidate fail the deletion when the cluster + // does not validate, after a validation period. + FailOnValidate bool + + // PostDrainDelay is the duration of a pause after a drain operation + PostDrainDelay time.Duration + + // ValidationTimeout is the timeout for validation to succeed after the drain and pause + ValidationTimeout time.Duration + + // ValidateCount is the amount of time that a cluster needs to be validated between drain and deletion + ValidateCount int32 + + ClusterName string + + InstanceID string + + Surge bool +} + +func (o *deleteInstanceOptions) initDefaults() { + d := &RollingUpdateOptions{} + d.InitDefaults() + + o.CloudOnly = false + o.FailOnDrainError = false + o.FailOnValidate = true + + o.PostDrainDelay = d.PostDrainDelay + o.ValidationTimeout = d.ValidationTimeout + o.ValidateCount = d.ValidateCount + + o.Surge = true +} + +func NewCmdDeleteInstance(f *util.Factory, out io.Writer) *cobra.Command { + deleteInstanceLong := templates.LongDesc(i18n.T(` + Delete an instance. By default, it will detach the instance from + the instance group, drain it, then terminate it.`)) + + deleteInstanceExample := templates.Examples(i18n.T(` + # Delete an instance from the currently active cluster. + kops delete instance i-0a5ed581b862d3425 --yes + + # Delete an instance from the currently active cluster using node name. + kops delete instance ip-xx.xx.xx.xx.ec2.internal --yes + + # Delete an instance from the currently active cluster without + validation or draining. + kops delete instance --cloudonly i-0a5ed581b862d3425 --yes + `)) + + deleteInstanceShort := i18n.T(`Delete an instance`) + + var options deleteInstanceOptions + options.initDefaults() + + cmd := &cobra.Command{ + Use: "instance", + Short: deleteInstanceShort, + Long: deleteInstanceLong, + Example: deleteInstanceExample, + } + + cmd.Flags().BoolVar(&options.CloudOnly, "cloudonly", options.CloudOnly, "Perform deletion update without confirming progress with k8s") + cmd.Flags().BoolVar(&options.Surge, "surge", options.Surge, "Surge by detaching the node from the ASG before deletion") + + cmd.Flags().DurationVar(&options.ValidationTimeout, "validation-timeout", options.ValidationTimeout, "Maximum time to wait for a cluster to validate") + cmd.Flags().Int32Var(&options.ValidateCount, "validate-count", options.ValidateCount, "Amount of times that a cluster needs to be validated after single node update") + cmd.Flags().DurationVar(&options.PostDrainDelay, "post-drain-delay", options.PostDrainDelay, "Time to wait after draining each node") + + cmd.Flags().BoolVar(&options.FailOnDrainError, "fail-on-drain-error", true, "The deletion will fail if draining a node fails.") + cmd.Flags().BoolVar(&options.FailOnValidate, "fail-on-validate-error", true, "The deletion will fail if the cluster fails to validate.") + + cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Specify --yes to immediately delete the instance") + + cmd.Run = func(cmd *cobra.Command, args []string) { + ctx := context.TODO() + + clusterName := rootCommand.ClusterName() + + if clusterName == "" { + exitWithError(fmt.Errorf("--name is required")) + return + } + + options.ClusterName = clusterName + if len(args) == 0 { + exitWithError(fmt.Errorf("specify ID of instance to delete")) + } + if len(args) != 1 { + exitWithError(fmt.Errorf("can only delete one instance at a time")) + } + + options.InstanceID = args[0] + + err := RunDeleteInstance(ctx, f, os.Stdout, &options) + if err != nil { + exitWithError(err) + return + } + + } + + return cmd +} + +func RunDeleteInstance(ctx context.Context, f *util.Factory, out io.Writer, options *deleteInstanceOptions) error { + + clientset, err := f.Clientset() + if err != nil { + return err + } + + cluster, err := GetCluster(ctx, f, options.ClusterName) + if err != nil { + return err + } + + contextName := cluster.ObjectMeta.Name + clientGetter := genericclioptions.NewConfigFlags(true) + clientGetter.Context = &contextName + + config, err := clientGetter.ToRESTConfig() + if err != nil { + return fmt.Errorf("cannot load kubecfg settings for %q: %v", contextName, err) + } + + var nodes []v1.Node + var k8sClient kubernetes.Interface + if !options.CloudOnly { + k8sClient, err = kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("cannot build kube client for %q: %v", contextName, err) + } + + nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to reach the kubernetes API.\n") + fmt.Fprintf(os.Stderr, "Use --cloudonly to do a deletion without confirming progress with the k8s API\n\n") + return fmt.Errorf("error listing nodes in cluster: %v", err) + } + + if nodeList != nil { + nodes = nodeList.Items + } + } + + list, err := clientset.InstanceGroupsFor(cluster).List(ctx, metav1.ListOptions{}) + if err != nil { + return err + } + + var instanceGroups []*kopsapi.InstanceGroup + for i := range list.Items { + instanceGroups = append(instanceGroups, &list.Items[i]) + } + + cloud, err := cloudup.BuildCloud(cluster) + if err != nil { + return err + } + + groups, err := cloud.GetCloudGroups(cluster, instanceGroups, false, nodes) + if err != nil { + return err + } + + cloudMember := findDeletionNode(groups, options) + + if cloudMember == nil { + return fmt.Errorf("could not find instance %v", options.InstanceID) + } + + if options.CloudOnly { + fmt.Fprintf(out, "Instance %v found for deletion\n", cloudMember.ID) + } else { + fmt.Fprintf(out, "Instance %v (%v) found for deletion\n", cloudMember.ID, cloudMember.Node.Name) + } + + if !options.Yes { + fmt.Fprintf(out, "\nMust specify --yes to delete instancegroup\n") + return nil + } + + d := &instancegroups.RollingUpdateCluster{ + MasterInterval: 0, + NodeInterval: 0, + BastionInterval: 0, + Interactive: false, + Force: true, + Cloud: cloud, + K8sClient: k8sClient, + FailOnDrainError: options.FailOnDrainError, + FailOnValidate: options.FailOnValidate, + CloudOnly: options.CloudOnly, + ClusterName: options.ClusterName, + PostDrainDelay: options.PostDrainDelay, + ValidationTimeout: options.ValidationTimeout, + ValidateCount: int(options.ValidateCount), + // TODO should we expose this to the UI? + ValidateTickDuration: 30 * time.Second, + ValidateSuccessDuration: 10 * time.Second, + } + + var clusterValidator validation.ClusterValidator + if !options.CloudOnly { + clusterValidator, err = validation.NewClusterValidator(cluster, cloud, list, k8sClient) + if err != nil { + return fmt.Errorf("cannot create cluster validator: %v", err) + } + } + d.ClusterValidator = clusterValidator + + return d.UpdateSingleInstance(ctx, cloudMember, options.Surge) +} + +func deleteNodeMatch(cloudMember *cloudinstances.CloudInstanceGroupMember, options *deleteInstanceOptions) bool { + return cloudMember.ID == options.InstanceID || + (!options.CloudOnly && cloudMember.Node != nil && cloudMember.Node.Name == options.InstanceID) +} + +func findDeletionNode(groups map[string]*cloudinstances.CloudInstanceGroup, options *deleteInstanceOptions) *cloudinstances.CloudInstanceGroupMember { + for _, group := range groups { + for _, r := range group.Ready { + if deleteNodeMatch(r, options) { + return r + } + } + for _, r := range group.NeedUpdate { + if deleteNodeMatch(r, options) { + return r + } + } + } + return nil +} diff --git a/docs/cli/kops.md b/docs/cli/kops.md index dab11089ff6b1..450c08a9b1d4b 100644 --- a/docs/cli/kops.md +++ b/docs/cli/kops.md @@ -38,7 +38,7 @@ kops is Kubernetes ops. * [kops completion](kops_completion.md) - Output shell completion code for the given shell (bash or zsh). * [kops create](kops_create.md) - Create a resource by command line, filename or stdin. -* [kops delete](kops_delete.md) - Delete clusters,instancegroups, or secrets. +* [kops delete](kops_delete.md) - Delete clusters,instancegroups, instances, or secrets. * [kops describe](kops_describe.md) - Describe a resource. * [kops edit](kops_edit.md) - Edit clusters and other resources. * [kops export](kops_export.md) - Export configuration. diff --git a/docs/cli/kops_delete.md b/docs/cli/kops_delete.md index 0a3bfc6a71ec8..602113473f361 100644 --- a/docs/cli/kops_delete.md +++ b/docs/cli/kops_delete.md @@ -3,11 +3,11 @@ ## kops delete -Delete clusters,instancegroups, or secrets. +Delete clusters,instancegroups, instances, or secrets. ### Synopsis -Delete Kubernetes clusters, instancegroups, and secrets, or a combination of the before mentioned. +Delete Kubernetes clusters, instancegroups, instances, and secrets, or a combination of the before mentioned. ``` kops delete -f FILENAME [--yes] [flags] @@ -16,6 +16,9 @@ kops delete -f FILENAME [--yes] [flags] ### Examples ``` + # Delete an instance + kops delete instance i-0a5ed581b862d3425 + # Delete a cluster using a manifest file kops delete -f my-cluster.yaml @@ -62,6 +65,7 @@ kops delete -f FILENAME [--yes] [flags] * [kops](kops.md) - kops is Kubernetes ops. * [kops delete cluster](kops_delete_cluster.md) - Delete a cluster. +* [kops delete instance](kops_delete_instance.md) - Delete an instance * [kops delete instancegroup](kops_delete_instancegroup.md) - Delete instancegroup * [kops delete secret](kops_delete_secret.md) - Delete a secret diff --git a/docs/cli/kops_delete_cluster.md b/docs/cli/kops_delete_cluster.md index 8c0bd4fe50d07..26de73dc6debb 100644 --- a/docs/cli/kops_delete_cluster.md +++ b/docs/cli/kops_delete_cluster.md @@ -53,5 +53,5 @@ kops delete cluster CLUSTERNAME [--yes] [flags] ### SEE ALSO -* [kops delete](kops_delete.md) - Delete clusters,instancegroups, or secrets. +* [kops delete](kops_delete.md) - Delete clusters,instancegroups, instances, or secrets. diff --git a/docs/cli/kops_delete_instance.md b/docs/cli/kops_delete_instance.md new file mode 100644 index 0000000000000..a70d73586e6f7 --- /dev/null +++ b/docs/cli/kops_delete_instance.md @@ -0,0 +1,67 @@ + + + +## kops delete instance + +Delete an instance + +### Synopsis + +Delete an instance. By default, it will detach the instance from the instance group, drain it, then terminate it. + +``` +kops delete instance [flags] +``` + +### Examples + +``` + # Delete an instance from the currently active cluster. + kops delete instance i-0a5ed581b862d3425 --yes + + # Delete an instance from the currently active cluster using node name. + kops delete instance ip-xx.xx.xx.xx.ec2.internal --yes + + # Delete an instance from the currently active cluster without + validation or draining. + kops delete instance --cloudonly i-0a5ed581b862d3425 --yes +``` + +### Options + +``` + --cloudonly Perform deletion update without confirming progress with k8s + --fail-on-drain-error The deletion will fail if draining a node fails. (default true) + --fail-on-validate-error The deletion will fail if the cluster fails to validate. (default true) + -h, --help help for instance + --post-drain-delay duration Time to wait after draining each node (default 5s) + --surge Surge by detaching the node from the ASG before deletion (default true) + --validate-count int32 Amount of times that a cluster needs to be validated after single node update (default 2) + --validation-timeout duration Maximum time to wait for a cluster to validate (default 15m0s) + -y, --yes Specify --yes to immediately delete the instance +``` + +### Options inherited from parent commands + +``` + --add_dir_header If true, adds the file directory to the header of the log messages + --alsologtostderr log to standard error as well as files + --config string yaml config file (default is $HOME/.kops.yaml) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_file string If non-empty, use this log file + --log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) + --logtostderr log to standard error instead of files (default true) + --name string Name of cluster. Overrides KOPS_CLUSTER_NAME environment variable + --skip_headers If true, avoid header prefixes in the log messages + --skip_log_headers If true, avoid headers when opening log files + --state string Location of state storage (kops 'config' file). Overrides KOPS_STATE_STORE environment variable + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level number for the log level verbosity + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +``` + +### SEE ALSO + +* [kops delete](kops_delete.md) - Delete clusters,instancegroups, instances, or secrets. + diff --git a/docs/cli/kops_delete_instancegroup.md b/docs/cli/kops_delete_instancegroup.md index 69bcbf71e7fe5..383e86c6e0e93 100644 --- a/docs/cli/kops_delete_instancegroup.md +++ b/docs/cli/kops_delete_instancegroup.md @@ -51,5 +51,5 @@ kops delete instancegroup [flags] ### SEE ALSO -* [kops delete](kops_delete.md) - Delete clusters,instancegroups, or secrets. +* [kops delete](kops_delete.md) - Delete clusters,instancegroups, instances, or secrets. diff --git a/docs/cli/kops_delete_secret.md b/docs/cli/kops_delete_secret.md index 398d3965cd23e..5cd97777571d8 100644 --- a/docs/cli/kops_delete_secret.md +++ b/docs/cli/kops_delete_secret.md @@ -49,5 +49,5 @@ kops delete secret [flags] ### SEE ALSO -* [kops delete](kops_delete.md) - Delete clusters,instancegroups, or secrets. +* [kops delete](kops_delete.md) - Delete clusters,instancegroups, instances, or secrets. diff --git a/docs/releases/1.19-NOTES.md b/docs/releases/1.19-NOTES.md index c405d80b5e306..02daec0ef8d53 100644 --- a/docs/releases/1.19-NOTES.md +++ b/docs/releases/1.19-NOTES.md @@ -40,6 +40,8 @@ has been updated by a newer version of kops unless it is given the `--allow-kops The certificates on a node will expire sometime between 455 and 485 days after the node's creation. The expiration times vary randomly so that nodes are likely to have their certs expire at different times than other nodes. +* New command for deleting a single instance: [kops delete instance](/docs/cli/kops_delete_instance/) + # Breaking changes * Support for Kubernetes 1.9 and 1.10 has been removed. diff --git a/pkg/instancegroups/instancegroups.go b/pkg/instancegroups/instancegroups.go index 5c108eacd805c..b688733d996c4 100644 --- a/pkg/instancegroups/instancegroups.go +++ b/pkg/instancegroups/instancegroups.go @@ -69,7 +69,8 @@ func promptInteractive(upgradedHostID, upgradedHostName string) (stopPrompting b } // RollingUpdate performs a rolling update on a list of instances. -func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, cluster *api.Cluster, group *cloudinstances.CloudInstanceGroup, isBastion bool, sleepAfterTerminate time.Duration) (err error) { +func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, cluster *api.Cluster, group *cloudinstances.CloudInstanceGroup, sleepAfterTerminate time.Duration) (err error) { + isBastion := group.InstanceGroup.IsBastion() // Do not need a k8s client if you are doing cloudonly. if c.K8sClient == nil && !c.CloudOnly { return fmt.Errorf("rollingUpdate is missing a k8s client") @@ -161,7 +162,7 @@ func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, c for uIdx, u := range update { go func(m *cloudinstances.CloudInstanceGroupMember) { - terminateChan <- c.drainTerminateAndWait(ctx, m, isBastion, sleepAfterTerminate) + terminateChan <- c.drainTerminateAndWait(ctx, m, sleepAfterTerminate) }(u) runningDrains++ @@ -320,7 +321,7 @@ func (c *RollingUpdateCluster) patchTaint(ctx context.Context, node *corev1.Node return err } -func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *cloudinstances.CloudInstanceGroupMember, isBastion bool, sleepAfterTerminate time.Duration) error { +func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *cloudinstances.CloudInstanceGroupMember, sleepAfterTerminate time.Duration) error { instanceID := u.ID nodeName := "" @@ -328,6 +329,8 @@ func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *clo nodeName = u.Node.Name } + isBastion := u.CloudInstanceGroup.InstanceGroup.IsBastion() + if isBastion { // We don't want to validate for bastions - they aren't part of the cluster } else if c.CloudOnly { @@ -561,3 +564,22 @@ func (c *RollingUpdateCluster) deleteNode(ctx context.Context, node *corev1.Node return nil } + +// UpdateSingeInstance performs a rolling update on a single instance +func (c *RollingUpdateCluster) UpdateSingleInstance(ctx context.Context, cloudMember *cloudinstances.CloudInstanceGroupMember, detach bool) error { + if detach { + if cloudMember.CloudInstanceGroup.InstanceGroup.IsMaster() { + klog.Warning("cannot detach master instances. Assuming --surge=false") + + } + err := c.detachInstance(cloudMember) + if err != nil { + return fmt.Errorf("failed to detach instance: %v", err) + } + if err := c.maybeValidate(" after detaching instance", c.ValidateCount); err != nil { + return err + } + } + + return c.drainTerminateAndWait(ctx, cloudMember, 0) +} diff --git a/pkg/instancegroups/rollingupdate.go b/pkg/instancegroups/rollingupdate.go index 472f0914acde7..ce6bf0b152952 100644 --- a/pkg/instancegroups/rollingupdate.go +++ b/pkg/instancegroups/rollingupdate.go @@ -138,7 +138,7 @@ func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[str defer wg.Done() - err := c.rollingUpdateInstanceGroup(ctx, cluster, bastionGroups[k], true, c.BastionInterval) + err := c.rollingUpdateInstanceGroup(ctx, cluster, bastionGroups[k], c.BastionInterval) resultsMutex.Lock() results[k] = err @@ -163,7 +163,7 @@ func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[str // and we don't want to roll all the masters at the same time. See issue #284 for _, k := range sortGroups(masterGroups) { - err := c.rollingUpdateInstanceGroup(ctx, cluster, masterGroups[k], false, c.MasterInterval) + err := c.rollingUpdateInstanceGroup(ctx, cluster, masterGroups[k], c.MasterInterval) // Do not continue update if master(s) failed, cluster is potentially in an unhealthy state if err != nil { @@ -185,7 +185,7 @@ func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[str } for _, k := range sortGroups(nodeGroups) { - err := c.rollingUpdateInstanceGroup(ctx, cluster, nodeGroups[k], false, c.NodeInterval) + err := c.rollingUpdateInstanceGroup(ctx, cluster, nodeGroups[k], c.NodeInterval) results[k] = err diff --git a/upup/pkg/fi/cloudup/awsup/aws_cloud.go b/upup/pkg/fi/cloudup/awsup/aws_cloud.go index a160a2d5aedd6..8f756ee30c3ed 100644 --- a/upup/pkg/fi/cloudup/awsup/aws_cloud.go +++ b/upup/pkg/fi/cloudup/awsup/aws_cloud.go @@ -477,6 +477,9 @@ func deleteInstance(c AWSCloud, i *cloudinstances.CloudInstanceGroupMember) erro // DetachInstance causes an aws instance to no longer be counted against the ASG's size limits. func (c *awsCloudImplementation) DetachInstance(i *cloudinstances.CloudInstanceGroupMember) error { + if i.Detached { + return nil + } if c.spotinst != nil { return spotinst.DetachInstance(c.spotinst, i) }