Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fast scale up when an asg fault #6729

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 22 additions & 3 deletions cluster-autoscaler/clusterstate/clusterstate.go
Original file line number Diff line number Diff line change
Expand Up @@ -654,7 +654,7 @@ func (csr *ClusterStateRegistry) updateReadinessStats(currentTime time.Time) {
klog.Warningf("Failed to get maxNodeProvisionTime for node %s in node group %s: %v", unregistered.Node.Name, nodeGroup.Id(), err)
continue
}
if unregistered.UnregisteredSince.Add(maxNodeProvisionTime).Before(currentTime) {
if unregistered.UnregisteredSince.Add(maxNodeProvisionTime).Before(currentTime) || IsFakeNodeUnhealthy(unregistered.Node) {
perNgCopy.LongUnregistered = append(perNgCopy.LongUnregistered, unregistered.Node.Name)
total.LongUnregistered = append(total.LongUnregistered, unregistered.Node.Name)
} else {
Expand Down Expand Up @@ -1023,7 +1023,7 @@ func getNotRegisteredNodes(allNodes []*apiv1.Node, cloudProviderNodeInstances ma
for _, instance := range instances {
if !registered.Has(instance.Id) && expectedToRegister(instance) {
notRegistered = append(notRegistered, UnregisteredNode{
Node: FakeNode(instance, cloudprovider.FakeNodeUnregistered),
Node: FakeNode(instance, buildFakeNodeReason(instance)),
UnregisteredSince: time,
})
}
Expand All @@ -1033,7 +1033,18 @@ func getNotRegisteredNodes(allNodes []*apiv1.Node, cloudProviderNodeInstances ma
}

func expectedToRegister(instance cloudprovider.Instance) bool {
return instance.Status == nil || (instance.Status.State != cloudprovider.InstanceDeleting && instance.Status.ErrorInfo == nil)
return instance.Status == nil || instance.Status.State != cloudprovider.InstanceDeleting
}

func buildFakeNodeReason(instance cloudprovider.Instance) string {
if instance.Status == nil {
return cloudprovider.FakeNodeUnregistered
}

if instance.Status.State == cloudprovider.InstanceCreating && instance.Status.ErrorInfo != nil {
return cloudprovider.FakeNodeCreateError
}
return cloudprovider.FakeNodeUnregistered
}

// Calculates which of the registered nodes in Kubernetes that do not exist in cloud provider.
Expand Down Expand Up @@ -1241,6 +1252,14 @@ func FakeNode(instance cloudprovider.Instance, reason string) *apiv1.Node {
}
}

// IsFakeNodeUnhealthy returns true if a fake node is unhealthy; false otherwise.
func IsFakeNodeUnhealthy(node *apiv1.Node) bool {
if reason, ok := node.Annotations[cloudprovider.FakeNodeReasonAnnotation]; ok && reason == cloudprovider.FakeNodeCreateError {
return true
}
return false
}

// PeriodicCleanup performs clean-ups that should be done periodically, e.g.
// each Autoscaler loop.
func (csr *ClusterStateRegistry) PeriodicCleanup() {
Expand Down
39 changes: 4 additions & 35 deletions cluster-autoscaler/core/static_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -448,11 +448,6 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
return nil
}

if deletedNodes := a.deleteCreatedNodesWithErrors(); deletedNodes {
klog.V(0).Infof("Some nodes that failed to create were removed, skipping iteration")
return nil
}

// Check if there has been a constant difference between the number of nodes in k8s and
// the number of nodes on the cloud provider side.
// TODO: andrewskim - add protection for ready AWS nodes.
Expand Down Expand Up @@ -764,6 +759,10 @@ func (a *StaticAutoscaler) removeOldUnregisteredNodes(allUnregisteredNodes []clu
klog.V(0).Infof("Marking unregistered node %v for removal", unregisteredNode.Node.Name)
nodesToDeleteByNodeGroupId[nodeGroup.Id()] = append(nodesToDeleteByNodeGroupId[nodeGroup.Id()], unregisteredNode)
}
if clusterstate.IsFakeNodeUnhealthy(unregisteredNode.Node) {
klog.V(0).Infof("Marking unregistered node %v for removal, because node is unhealthy", unregisteredNode.Node.Name)
nodesToDeleteByNodeGroupId[nodeGroup.Id()] = append(nodesToDeleteByNodeGroupId[nodeGroup.Id()], unregisteredNode)
}
}

removedAny := false
Expand Down Expand Up @@ -821,36 +820,6 @@ func toNodes(unregisteredNodes []clusterstate.UnregisteredNode) []*apiv1.Node {
return nodes
}

func (a *StaticAutoscaler) deleteCreatedNodesWithErrors() bool {
// We always schedule deleting of incoming errornous nodes
// TODO[lukaszos] Consider adding logic to not retry delete every loop iteration
nodeGroups := a.nodeGroupsById()
nodesToDeleteByNodeGroupId := a.clusterStateRegistry.GetCreatedNodesWithErrors()

deletedAny := false

for nodeGroupId, nodesToDelete := range nodesToDeleteByNodeGroupId {
var err error
klog.V(1).Infof("Deleting %v from %v node group because of create errors", len(nodesToDelete), nodeGroupId)

nodeGroup := nodeGroups[nodeGroupId]
if nodeGroup == nil {
err = fmt.Errorf("node group %s not found", nodeGroupId)
} else if nodesToDelete, err = overrideNodesToDeleteForZeroOrMax(a.NodeGroupDefaults, nodeGroup, nodesToDelete); err == nil {
err = nodeGroup.DeleteNodes(nodesToDelete)
}

if err != nil {
klog.Warningf("Error while trying to delete nodes from %v: %v", nodeGroupId, err)
} else {
deletedAny = true
a.clusterStateRegistry.InvalidateNodeInstancesCacheEntry(nodeGroup)
}
}

return deletedAny
}

// overrideNodesToDeleteForZeroOrMax returns a list of nodes to delete, taking into account that
// node deletion for a "ZeroOrMaxNodeScaling" node group is atomic and should delete all nodes.
// For a non-"ZeroOrMaxNodeScaling" node group it returns the unchanged list of nodes to delete.
Expand Down
88 changes: 57 additions & 31 deletions cluster-autoscaler/core/static_autoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1578,7 +1578,7 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
ScaleDownUnneededTime: time.Minute,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUtilizationThreshold: 0.5,
MaxNodeProvisionTime: 10 * time.Second,
MaxNodeProvisionTime: 1 * time.Minute,
},
EstimatorName: estimator.BinpackingEstimatorName,
ScaleDownEnabled: true,
Expand All @@ -1587,24 +1587,6 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
MaxMemoryTotal: 100000,
ExpendablePodsPriorityCutoff: 10,
}
processorCallbacks := newStaticAutoscalerProcessorCallbacks()

context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, processorCallbacks, nil)
assert.NoError(t, err)

clusterStateConfig := clusterstate.ClusterStateRegistryConfig{
OkTotalUnreadyCount: 1,
}

nodeGroupConfigProcessor := nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodeGroupConfigProcessor)
autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
clusterStateRegistry: clusterState,
lastScaleUpTime: time.Now(),
lastScaleDownFailTime: time.Now(),
processorCallbacks: processorCallbacks,
}

nodeGroupA := &mockprovider.NodeGroup{}
nodeGroupB := &mockprovider.NodeGroup{}
Expand All @@ -1613,6 +1595,7 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
nodeGroupA.On("Exist").Return(true)
nodeGroupA.On("Autoprovisioned").Return(false)
nodeGroupA.On("TargetSize").Return(5, nil)
nodeGroupA.On("MinSize").Return(1, nil)
nodeGroupA.On("Id").Return("A")
nodeGroupA.On("DeleteNodes", mock.Anything).Return(nil)
nodeGroupA.On("GetOptions", options.NodeGroupDefaults).Return(&options.NodeGroupDefaults, nil)
Expand Down Expand Up @@ -1674,6 +1657,7 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
nodeGroupB.On("Exist").Return(true)
nodeGroupB.On("Autoprovisioned").Return(false)
nodeGroupB.On("TargetSize").Return(5, nil)
nodeGroupA.On("MinSize").Return(1, nil)
nodeGroupB.On("Id").Return("B")
nodeGroupB.On("DeleteNodes", mock.Anything).Return(nil)
nodeGroupB.On("GetOptions", options.NodeGroupDefaults).Return(&options.NodeGroupDefaults, nil)
Expand Down Expand Up @@ -1704,13 +1688,33 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {

now := time.Now()

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := clusterstate_utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")

context := &context.AutoscalingContext{
AutoscalingOptions: options,
CloudProvider: provider,
}
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(context.AutoscalingOptions.NodeGroupDefaults))
clusterState.RefreshCloudProviderNodeInstancesCache()
// propagate nodes info in cluster state
clusterState.UpdateNodes([]*apiv1.Node{}, nil, now)

unregisteredNodes := clusterState.GetUnregisteredNodes()
assert.Equal(t, 6, len(unregisteredNodes))

autoscaler := &StaticAutoscaler{
AutoscalingContext: context,
clusterStateRegistry: clusterState,
}

// delete nodes with create errors
removedNodes := autoscaler.deleteCreatedNodesWithErrors()
removedNodes, err := autoscaler.removeOldUnregisteredNodes(unregisteredNodes, context, clusterState, now, fakeLogRecorder)
assert.True(t, removedNodes)
assert.NoError(t, err)

// check delete was called on correct nodes
nodeGroupA.AssertCalled(t, "DeleteNodes", mock.MatchedBy(
Expand All @@ -1734,8 +1738,11 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
clusterState.UpdateNodes([]*apiv1.Node{}, nil, now)

// delete nodes with create errors
removedNodes = autoscaler.deleteCreatedNodesWithErrors()
unregisteredNodes = clusterState.GetUnregisteredNodes()
assert.Equal(t, 6, len(unregisteredNodes))
removedNodes, err = autoscaler.removeOldUnregisteredNodes(unregisteredNodes, context, clusterState, now, fakeLogRecorder)
assert.True(t, removedNodes)
assert.NoError(t, err)

// nodes should be deleted again
nodeGroupA.AssertCalled(t, "DeleteNodes", mock.MatchedBy(
Expand Down Expand Up @@ -1798,8 +1805,11 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
clusterState.UpdateNodes([]*apiv1.Node{}, nil, now)

// delete nodes with create errors
removedNodes = autoscaler.deleteCreatedNodesWithErrors()
unregisteredNodes = clusterState.GetUnregisteredNodes()
assert.Equal(t, 2, len(unregisteredNodes))
removedNodes, err = autoscaler.removeOldUnregisteredNodes(unregisteredNodes, context, clusterState, now, fakeLogRecorder)
assert.False(t, removedNodes)
assert.NoError(t, err)

// we expect no more Delete Nodes
nodeGroupA.AssertNumberOfCalls(t, "DeleteNodes", 2)
Expand Down Expand Up @@ -1832,22 +1842,29 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
return false
}, nil)

clusterState = clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodeGroupConfigProcessor)
clusterState = clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(context.AutoscalingOptions.NodeGroupDefaults))
clusterState.RefreshCloudProviderNodeInstancesCache()
autoscaler.clusterStateRegistry = clusterState

// update cluster state
clusterState.UpdateNodes([]*apiv1.Node{}, nil, time.Now())

// No nodes are deleted when failed nodes don't have matching node groups
removedNodes = autoscaler.deleteCreatedNodesWithErrors()
// return early on failed nodes without matching nodegroups
unregisteredNodes = clusterState.GetUnregisteredNodes()
assert.Equal(t, 1, len(unregisteredNodes))
removedNodes, err = autoscaler.removeOldUnregisteredNodes(unregisteredNodes, context, clusterState, now, fakeLogRecorder)
assert.False(t, removedNodes)
assert.NoError(t, err)
nodeGroupC.AssertNumberOfCalls(t, "DeleteNodes", 0)

nodeGroupAtomic := &mockprovider.NodeGroup{}
nodeGroupAtomic.On("Exist").Return(true)
nodeGroupAtomic.On("Autoprovisioned").Return(false)
nodeGroupAtomic.On("TargetSize").Return(3, nil)
nodeGroupAtomic.On("MinSize").Return(1, nil)
nodeGroupAtomic.On("Id").Return("D")
nodeGroupAtomic.On("DeleteNodes", mock.Anything).Return(nil)
nodeGroupAtomic.On("GetOptions", options.NodeGroupDefaults).Return(
Expand Down Expand Up @@ -1886,18 +1903,24 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
return nodeGroupAtomic
}
return nil
}, nil).Times(3)
}, nil).Times(6)

clusterState = clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodeGroupConfigProcessor)
clusterState = clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(context.AutoscalingOptions.NodeGroupDefaults))
clusterState.RefreshCloudProviderNodeInstancesCache()
autoscaler.CloudProvider = provider
autoscaler.clusterStateRegistry = clusterState
// propagate nodes info in cluster state
clusterState.UpdateNodes([]*apiv1.Node{}, nil, now)

// delete nodes with create errors
removedNodes = autoscaler.deleteCreatedNodesWithErrors()
unregisteredNodes = clusterState.GetUnregisteredNodes()
assert.Equal(t, 3, len(unregisteredNodes))
removedNodes, err = autoscaler.removeOldUnregisteredNodes(unregisteredNodes, context, clusterState, now, fakeLogRecorder)
assert.True(t, removedNodes)
assert.NoError(t, err)

nodeGroupAtomic.AssertCalled(t, "DeleteNodes", mock.MatchedBy(
func(nodes []*apiv1.Node) bool {
Expand Down Expand Up @@ -1947,17 +1970,20 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
return nodeGroupError
}
return nil
}, nil).Times(2)
}, nil).Times(5)

clusterState = clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodeGroupConfigProcessor)
clusterState = clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(context.AutoscalingOptions.NodeGroupDefaults))
clusterState.RefreshCloudProviderNodeInstancesCache()
autoscaler.CloudProvider = provider
autoscaler.clusterStateRegistry = clusterState
// propagate nodes info in cluster state
clusterState.UpdateNodes([]*apiv1.Node{}, nil, now)

// delete nodes with create errors
removedNodes = autoscaler.deleteCreatedNodesWithErrors()
removedNodes, err = autoscaler.removeOldUnregisteredNodes(unregisteredNodes, context, clusterState, now, fakeLogRecorder)
assert.False(t, removedNodes)

nodeGroupError.AssertNumberOfCalls(t, "DeleteNodes", 0)
Expand Down
Loading