Skip to content

Commit

Permalink
Don't pass around isBastion when it can be infered from other args
Browse files Browse the repository at this point in the history
  • Loading branch information
Ole Markus With committed Aug 24, 2020
1 parent 1ab5029 commit e205046
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 7 deletions.
11 changes: 7 additions & 4 deletions pkg/instancegroups/instancegroups.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ func promptInteractive(upgradedHostID, upgradedHostName string) (stopPrompting b
}

// RollingUpdate performs a rolling update on a list of instances.
func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, cluster *api.Cluster, group *cloudinstances.CloudInstanceGroup, isBastion bool, sleepAfterTerminate time.Duration) (err error) {
func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, cluster *api.Cluster, group *cloudinstances.CloudInstanceGroup, sleepAfterTerminate time.Duration) (err error) {
isBastion := group.InstanceGroup.IsBastion()
// Do not need a k8s client if you are doing cloudonly.
if c.K8sClient == nil && !c.CloudOnly {
return fmt.Errorf("rollingUpdate is missing a k8s client")
Expand Down Expand Up @@ -161,7 +162,7 @@ func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, c

for uIdx, u := range update {
go func(m *cloudinstances.CloudInstanceGroupMember) {
terminateChan <- c.drainTerminateAndWait(ctx, m, isBastion, sleepAfterTerminate)
terminateChan <- c.drainTerminateAndWait(ctx, m, sleepAfterTerminate)
}(u)
runningDrains++

Expand Down Expand Up @@ -320,14 +321,16 @@ func (c *RollingUpdateCluster) patchTaint(ctx context.Context, node *corev1.Node
return err
}

func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *cloudinstances.CloudInstanceGroupMember, isBastion bool, sleepAfterTerminate time.Duration) error {
func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *cloudinstances.CloudInstanceGroupMember, sleepAfterTerminate time.Duration) error {
instanceID := u.ID

nodeName := ""
if u.Node != nil {
nodeName = u.Node.Name
}

isBastion := u.CloudInstanceGroup.InstanceGroup.IsBastion()

if isBastion {
// We don't want to validate for bastions - they aren't part of the cluster
} else if c.CloudOnly {
Expand Down Expand Up @@ -578,5 +581,5 @@ func (c *RollingUpdateCluster) UpdateSingleInstance(ctx context.Context, cloudMe
}
}

return c.drainTerminateAndWait(ctx, cloudMember, false, 0)
return c.drainTerminateAndWait(ctx, cloudMember, 0)
}
6 changes: 3 additions & 3 deletions pkg/instancegroups/rollingupdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[str

defer wg.Done()

err := c.rollingUpdateInstanceGroup(ctx, cluster, bastionGroups[k], true, c.BastionInterval)
err := c.rollingUpdateInstanceGroup(ctx, cluster, bastionGroups[k], c.BastionInterval)

resultsMutex.Lock()
results[k] = err
Expand All @@ -163,7 +163,7 @@ func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[str
// and we don't want to roll all the masters at the same time. See issue #284

for _, k := range sortGroups(masterGroups) {
err := c.rollingUpdateInstanceGroup(ctx, cluster, masterGroups[k], false, c.MasterInterval)
err := c.rollingUpdateInstanceGroup(ctx, cluster, masterGroups[k], c.MasterInterval)

// Do not continue update if master(s) failed, cluster is potentially in an unhealthy state
if err != nil {
Expand All @@ -185,7 +185,7 @@ func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[str
}

for _, k := range sortGroups(nodeGroups) {
err := c.rollingUpdateInstanceGroup(ctx, cluster, nodeGroups[k], false, c.NodeInterval)
err := c.rollingUpdateInstanceGroup(ctx, cluster, nodeGroups[k], c.NodeInterval)

results[k] = err

Expand Down

0 comments on commit e205046

Please sign in to comment.