From 43dd34074ef6ccc8a62481c21f376878c2671023 Mon Sep 17 00:00:00 2001 From: Eric Mrak and Brett Kochendorfer Date: Wed, 17 Feb 2021 21:52:54 +0000 Subject: [PATCH 1/3] Allow name of cluster-autoscaler status ConfigMap to be specificed This allows us to run two instances of cluster-autoscaler in our cluster, targeting two different types of autoscaling groups that require different command-line settings to be passed. --- cluster-autoscaler/clusterstate/utils/status.go | 16 +++++++--------- cluster-autoscaler/config/autoscaling_options.go | 2 ++ .../context/autoscaling_context.go | 4 ++-- cluster-autoscaler/core/scale_test_common.go | 2 +- cluster-autoscaler/core/static_autoscaler.go | 6 +++--- cluster-autoscaler/main.go | 2 ++ 6 files changed, 17 insertions(+), 15 deletions(-) diff --git a/cluster-autoscaler/clusterstate/utils/status.go b/cluster-autoscaler/clusterstate/utils/status.go index f7deb1ef22dc..3636abf72138 100644 --- a/cluster-autoscaler/clusterstate/utils/status.go +++ b/cluster-autoscaler/clusterstate/utils/status.go @@ -33,8 +33,6 @@ import ( ) const ( - // StatusConfigMapName is the name of ConfigMap with status. - StatusConfigMapName = "cluster-autoscaler-status" // ConfigMapLastUpdatedKey is the name of annotation informing about status ConfigMap last update. ConfigMapLastUpdatedKey = "cluster-autoscaler.kubernetes.io/last-updated" // ConfigMapLastUpdateFormat it the timestamp format used for last update annotation in status ConfigMap @@ -65,11 +63,11 @@ func (ler *LogEventRecorder) Eventf(eventtype, reason, message string, args ...i // NewStatusMapRecorder creates a LogEventRecorder creating events on status configmap. // If the configmap doesn't exist it will be created (with 'Initializing' status). // If active == false the map will not be created and no events will be recorded. -func NewStatusMapRecorder(kubeClient kube_client.Interface, namespace string, recorder record.EventRecorder, active bool) (*LogEventRecorder, error) { +func NewStatusMapRecorder(kubeClient kube_client.Interface, namespace string, recorder record.EventRecorder, active bool, statusConfigMapName string) (*LogEventRecorder, error) { var mapObj runtime.Object var err error if active { - mapObj, err = WriteStatusConfigMap(kubeClient, namespace, "Initializing", nil) + mapObj, err = WriteStatusConfigMap(kubeClient, namespace, "Initializing", nil, statusConfigMapName) if err != nil { return nil, errors.New("Failed to init status ConfigMap") } @@ -84,14 +82,14 @@ func NewStatusMapRecorder(kubeClient kube_client.Interface, namespace string, re // WriteStatusConfigMap writes updates status ConfigMap with a given message or creates a new // ConfigMap if it doesn't exist. If logRecorder is passed and configmap update is successful // logRecorder's internal reference will be updated. -func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, msg string, logRecorder *LogEventRecorder) (*apiv1.ConfigMap, error) { +func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, msg string, logRecorder *LogEventRecorder, statusConfigMapName string) (*apiv1.ConfigMap, error) { statusUpdateTime := time.Now().Format(ConfigMapLastUpdateFormat) statusMsg := fmt.Sprintf("Cluster-autoscaler status at %s:\n%v", statusUpdateTime, msg) var configMap *apiv1.ConfigMap var getStatusError, writeStatusError error var errMsg string maps := kubeClient.CoreV1().ConfigMaps(namespace) - configMap, getStatusError = maps.Get(context.TODO(), StatusConfigMapName, metav1.GetOptions{}) + configMap, getStatusError = maps.Get(context.TODO(), statusConfigMapName, metav1.GetOptions{}) if getStatusError == nil { configMap.Data["status"] = statusMsg if configMap.ObjectMeta.Annotations == nil { @@ -103,7 +101,7 @@ func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, ms configMap = &apiv1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: StatusConfigMapName, + Name: statusConfigMapName, Annotations: map[string]string{ ConfigMapLastUpdatedKey: statusUpdateTime, }, @@ -133,9 +131,9 @@ func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, ms } // DeleteStatusConfigMap deletes status configmap -func DeleteStatusConfigMap(kubeClient kube_client.Interface, namespace string) error { +func DeleteStatusConfigMap(kubeClient kube_client.Interface, namespace string, statusConfigMapName string) error { maps := kubeClient.CoreV1().ConfigMaps(namespace) - err := maps.Delete(context.TODO(), StatusConfigMapName, metav1.DeleteOptions{}) + err := maps.Delete(context.TODO(), statusConfigMapName, metav1.DeleteOptions{}) if err != nil { klog.Error("Failed to delete status configmap") } diff --git a/cluster-autoscaler/config/autoscaling_options.go b/cluster-autoscaler/config/autoscaling_options.go index 8e0effb817b9..cec3e0dda5e3 100644 --- a/cluster-autoscaler/config/autoscaling_options.go +++ b/cluster-autoscaler/config/autoscaling_options.go @@ -117,6 +117,8 @@ type AutoscalingOptions struct { NodeDeletionDelayTimeout time.Duration // WriteStatusConfigMap tells if the status information should be written to a ConfigMap WriteStatusConfigMap bool + // StaticConfigMapName + StatusConfigMapName string // BalanceSimilarNodeGroups enables logic that identifies node groups with similar machines and tries to balance node count between them. BalanceSimilarNodeGroups bool // ConfigNamespace is the namespace cluster-autoscaler is running in and all related configmaps live in diff --git a/cluster-autoscaler/context/autoscaling_context.go b/cluster-autoscaler/context/autoscaling_context.go index 2dda9ee7613b..d26c8d81bf82 100644 --- a/cluster-autoscaler/context/autoscaling_context.go +++ b/cluster-autoscaler/context/autoscaling_context.go @@ -111,12 +111,12 @@ func NewAutoscalingKubeClients(opts config.AutoscalingOptions, kubeClient, event listerRegistryStopChannel := make(chan struct{}) listerRegistry := kube_util.NewListerRegistryWithDefaultListers(kubeClient, listerRegistryStopChannel) kubeEventRecorder := kube_util.CreateEventRecorder(eventsKubeClient) - logRecorder, err := utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, opts.WriteStatusConfigMap) + logRecorder, err := utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, opts.WriteStatusConfigMap, opts.StatusConfigMapName) if err != nil { klog.Error("Failed to initialize status configmap, unable to write status events") // Get a dummy, so we can at least safely call the methods // TODO(maciekpytel): recover from this after successful status configmap update? - logRecorder, _ = utils.NewStatusMapRecorder(eventsKubeClient, opts.ConfigNamespace, kubeEventRecorder, false) + logRecorder, _ = utils.NewStatusMapRecorder(eventsKubeClient, opts.ConfigNamespace, kubeEventRecorder, false, opts.StatusConfigMapName) } return &AutoscalingKubeClients{ diff --git a/cluster-autoscaler/core/scale_test_common.go b/cluster-autoscaler/core/scale_test_common.go index f7cff0c8f731..c3be7c5a095c 100644 --- a/cluster-autoscaler/core/scale_test_common.go +++ b/cluster-autoscaler/core/scale_test_common.go @@ -153,7 +153,7 @@ func NewScaleTestAutoscalingContext( // Not enough buffer space causes the test to hang without printing any logs. // This is not useful. fakeRecorder := kube_record.NewFakeRecorder(100) - fakeLogRecorder, err := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) + fakeLogRecorder, err := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false, "my-cool-configmap") if err != nil { return context.AutoscalingContext{}, err } diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go index 9fc3016cae43..30c2b53e345a 100644 --- a/cluster-autoscaler/core/static_autoscaler.go +++ b/cluster-autoscaler/core/static_autoscaler.go @@ -294,7 +294,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError if autoscalingContext.WriteStatusConfigMap { status := a.clusterStateRegistry.GetStatus(currentTime) utils.WriteStatusConfigMap(autoscalingContext.ClientSet, autoscalingContext.ConfigNamespace, - status.GetReadableString(), a.AutoscalingContext.LogRecorder) + status.GetReadableString(), a.AutoscalingContext.LogRecorder, a.AutoscalingContext.StatusConfigMapName) } // This deferred processor execution allows the processors to handle a situation when a scale-(up|down) @@ -696,7 +696,7 @@ func (a *StaticAutoscaler) ExitCleanUp() { if !a.AutoscalingContext.WriteStatusConfigMap { return } - utils.DeleteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace) + utils.DeleteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace, a.AutoscalingContext.StatusConfigMapName) a.clusterStateRegistry.Stop() } @@ -761,7 +761,7 @@ func (a *StaticAutoscaler) onEmptyCluster(status string, emitEvent bool) { metrics.UpdateClusterSafeToAutoscale(false) metrics.UpdateNodesCount(0, 0, 0, 0, 0) if a.AutoscalingContext.WriteStatusConfigMap { - utils.WriteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace, status, a.AutoscalingContext.LogRecorder) + utils.WriteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace, status, a.AutoscalingContext.LogRecorder, a.AutoscalingContext.StatusConfigMapName) } if emitEvent { a.AutoscalingContext.LogRecorder.Eventf(apiv1.EventTypeWarning, "ClusterUnhealthy", status) diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index a7360a527132..304bbedd6300 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -158,6 +158,7 @@ var ( "Should CA ignore Mirror pods when calculating resource utilization for scaling down") writeStatusConfigMapFlag = flag.Bool("write-status-configmap", true, "Should CA write status information to a configmap") + statusConfigMapName = flag.String("status-config-map-name", "cluster-autoscaler-status", "Status configmap name") maxInactivityTimeFlag = flag.Duration("max-inactivity", 10*time.Minute, "Maximum time from last recorded autoscaler activity before automatic restart") maxFailingTimeFlag = flag.Duration("max-failing-time", 15*time.Minute, "Maximum time from last recorded successful autoscaler run before automatic restart") balanceSimilarNodeGroupsFlag = flag.Bool("balance-similar-node-groups", false, "Detect similar node groups and balance the number of nodes between them") @@ -233,6 +234,7 @@ func createAutoscalingOptions() config.AutoscalingOptions { ScaleDownCandidatesPoolRatio: *scaleDownCandidatesPoolRatio, ScaleDownCandidatesPoolMinCount: *scaleDownCandidatesPoolMinCount, WriteStatusConfigMap: *writeStatusConfigMapFlag, + StatusConfigMapName: *statusConfigMapName, BalanceSimilarNodeGroups: *balanceSimilarNodeGroupsFlag, ConfigNamespace: *namespace, ClusterName: *clusterName, From 8442ba8307ae2d6fdd8c0da6800a5706712f4770 Mon Sep 17 00:00:00 2001 From: Eric Mrak and Brett Kochendorfer Date: Thu, 18 Feb 2021 17:21:32 +0000 Subject: [PATCH 2/3] Add argument for Status Configmap tests --- .../clusterstate/clusterstate_test.go | 32 +++++++++---------- .../clusterstate/utils/status_test.go | 12 +++---- .../core/static_autoscaler_test.go | 4 +-- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/cluster-autoscaler/clusterstate/clusterstate_test.go b/cluster-autoscaler/clusterstate/clusterstate_test.go index 02cc844d0ec9..7e4035af44dd 100644 --- a/cluster-autoscaler/clusterstate/clusterstate_test.go +++ b/cluster-autoscaler/clusterstate/clusterstate_test.go @@ -52,7 +52,7 @@ func TestOKWithScaleUp(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -94,7 +94,7 @@ func TestEmptyOK(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -135,7 +135,7 @@ func TestOKOneUnreadyNode(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -173,7 +173,7 @@ func TestNodeWithoutNodeGroupDontCrash(t *testing.T) { provider.AddNode("no_ng", noNgNode) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -200,7 +200,7 @@ func TestOKOneUnreadyNodeWithScaleDownCandidate(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -265,7 +265,7 @@ func TestMissingNodes(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -307,7 +307,7 @@ func TestTooManyUnready(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -331,7 +331,7 @@ func TestExpiredScaleUp(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -357,7 +357,7 @@ func TestRegisterScaleDown(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -410,7 +410,7 @@ func TestUpcomingNodes(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -433,7 +433,7 @@ func TestIncorrectSize(t *testing.T) { provider.AddNode("ng1", ng1_1) assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -469,7 +469,7 @@ func TestUnregisteredNodes(t *testing.T) { provider.AddNode("ng1", ng1_2) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -621,7 +621,7 @@ func TestScaleUpBackoff(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -689,7 +689,7 @@ func TestGetClusterSize(t *testing.T) { provider.AddNode("notAutoscaledNode", notAutoscaledNode) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -731,7 +731,7 @@ func TestUpdateScaleUp(t *testing.T) { provider := testprovider.NewTestCloudProvider(nil, nil) provider.AddNodeGroup("ng1", 1, 10, 5) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry( provider, ClusterStateRegistryConfig{ @@ -828,7 +828,7 @@ func TestScaleUpFailures(t *testing.T) { assert.NotNil(t, provider) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{}, fakeLogRecorder, newBackoff()) clusterstate.RegisterFailedScaleUp(provider.GetNodeGroup("ng1"), metrics.Timeout, now) diff --git a/cluster-autoscaler/clusterstate/utils/status_test.go b/cluster-autoscaler/clusterstate/utils/status_test.go index 9295bf6ff030..a0dc5dab82bb 100644 --- a/cluster-autoscaler/clusterstate/utils/status_test.go +++ b/cluster-autoscaler/clusterstate/utils/status_test.go @@ -48,7 +48,7 @@ func setUpTest(t *testing.T) *testInfo { configMap: &apiv1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: StatusConfigMapName, + Name: "my-cool-configmap", }, Data: map[string]string{}, }, @@ -61,7 +61,7 @@ func setUpTest(t *testing.T) *testInfo { result.client.Fake.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) { get := action.(core.GetAction) assert.Equal(result.t, namespace, get.GetNamespace()) - assert.Equal(result.t, StatusConfigMapName, get.GetName()) + assert.Equal(result.t, "my-cool-configmap", get.GetName()) result.getCalled = true if result.getError != nil { return true, nil, result.getError @@ -78,7 +78,7 @@ func setUpTest(t *testing.T) *testInfo { create := action.(core.CreateAction) assert.Equal(result.t, namespace, create.GetNamespace()) configMap := create.GetObject().(*apiv1.ConfigMap) - assert.Equal(result.t, StatusConfigMapName, configMap.ObjectMeta.Name) + assert.Equal(result.t, "my-cool-configmap", configMap.ObjectMeta.Name) result.createCalled = true return true, configMap, nil }) @@ -87,7 +87,7 @@ func setUpTest(t *testing.T) *testInfo { func TestWriteStatusConfigMapExisting(t *testing.T) { ti := setUpTest(t) - result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil) + result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil, "my-cool-configmap") assert.Equal(t, ti.configMap, result) assert.Contains(t, result.Data["status"], "TEST_MSG") assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey) @@ -100,7 +100,7 @@ func TestWriteStatusConfigMapExisting(t *testing.T) { func TestWriteStatusConfigMapCreate(t *testing.T) { ti := setUpTest(t) ti.getError = kube_errors.NewNotFound(apiv1.Resource("configmap"), "nope, not found") - result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil) + result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil, "my-cool-configmap") assert.Contains(t, result.Data["status"], "TEST_MSG") assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey) assert.Nil(t, err) @@ -112,7 +112,7 @@ func TestWriteStatusConfigMapCreate(t *testing.T) { func TestWriteStatusConfigMapError(t *testing.T) { ti := setUpTest(t) ti.getError = errors.New("stuff bad") - result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil) + result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil, "my-cool-configmap") assert.NotNil(t, err) assert.Contains(t, err.Error(), "stuff bad") assert.Nil(t, result) diff --git a/cluster-autoscaler/core/static_autoscaler_test.go b/cluster-autoscaler/core/static_autoscaler_test.go index 9502150b1eb9..d7b08bfbf848 100644 --- a/cluster-autoscaler/core/static_autoscaler_test.go +++ b/cluster-autoscaler/core/static_autoscaler_test.go @@ -1189,7 +1189,7 @@ func TestRemoveFixNodeTargetSize(t *testing.T) { provider.AddNode("ng1", ng1_1) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := clusterstate_utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := clusterstate_utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, @@ -1235,7 +1235,7 @@ func TestRemoveOldUnregisteredNodes(t *testing.T) { provider.AddNode("ng1", ng1_2) fakeClient := &fake.Clientset{} - fakeLogRecorder, _ := clusterstate_utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false) + fakeLogRecorder, _ := clusterstate_utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap") clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, From 2d73844af6e6dedcb9572fc8c51753444ebf9928 Mon Sep 17 00:00:00 2001 From: Eric Mrak and Brett Kochendorfer Date: Wed, 3 Mar 2021 23:58:43 +0000 Subject: [PATCH 3/3] Update parameter list in FAQ --- cluster-autoscaler/FAQ.md | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster-autoscaler/FAQ.md b/cluster-autoscaler/FAQ.md index b338066b6f34..e502bd0d2737 100644 --- a/cluster-autoscaler/FAQ.md +++ b/cluster-autoscaler/FAQ.md @@ -664,6 +664,7 @@ The following startup parameters are supported for cluster autoscaler: | `estimator` | Type of resource estimator to be used in scale up | binpacking | `expander` | Type of node group expander to be used in scale up. | random | `write-status-configmap` | Should CA write status information to a configmap | true +| `status-config-map-name` | The name of the status ConfigMap that CA writes | cluster-autoscaler-status | `max-inactivity` | Maximum time from last recorded autoscaler activity before automatic restart | 10 minutes | `max-failing-time` | Maximum time from last recorded successful autoscaler run before automatic restart | 15 minutes | `balance-similar-node-groups` | Detect similar node groups and balance the number of nodes between them | false