From 8ecdb8b936db180a3210b219c8b92e3975b2b6a6 Mon Sep 17 00:00:00 2001 From: LochanRn Date: Sat, 2 Jul 2022 00:55:18 +0530 Subject: [PATCH] Updated the check for updating machinepool when using autoscalar --- azure/const.go | 4 ++ azure/scope/managedmachinepool.go | 47 ++++++++++++++----- azure/services/agentpools/agentpools.go | 22 +++++++++ .../azuremanagedmachinepool_controller.go | 3 ++ 4 files changed, 65 insertions(+), 11 deletions(-) diff --git a/azure/const.go b/azure/const.go index 9c4ac179ab26..c45b0d1d4617 100644 --- a/azure/const.go +++ b/azure/const.go @@ -28,4 +28,8 @@ const ( // See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ // for annotation formatting rules. RGTagsLastAppliedAnnotation = "sigs.k8s.io/cluster-api-provider-azure-last-applied-tags-rg" + + // ReplicasManagedByAutoscalerAnnotation is set to true in the corresponding capi machine pool + // when an external autoscaler manages the node count of the associated machine pool. + ReplicasManagedByAutoscalerAnnotation = "cluster.x-k8s.io/replicas-managed-by-autoscaler" ) diff --git a/azure/scope/managedmachinepool.go b/azure/scope/managedmachinepool.go index 1d7b6e3fb660..41af164b85d0 100644 --- a/azure/scope/managedmachinepool.go +++ b/azure/scope/managedmachinepool.go @@ -69,21 +69,28 @@ func NewManagedMachinePoolScope(ctx context.Context, params ManagedMachinePoolSc return nil, errors.Wrap(err, "failed to init patch helper") } + capiMachinePoolPatchHelper, err := patch.NewHelper(params.MachinePool, params.Client) + if err != nil { + return nil, errors.Wrap(err, "failed to init patch helper") + } + return &ManagedMachinePoolScope{ - Client: params.Client, - Cluster: params.Cluster, - ControlPlane: params.ControlPlane, - MachinePool: params.MachinePool, - InfraMachinePool: params.InfraMachinePool, - patchHelper: helper, - ManagedClusterScoper: params.ManagedControlPlaneScope, + Client: params.Client, + Cluster: params.Cluster, + ControlPlane: params.ControlPlane, + MachinePool: params.MachinePool, + InfraMachinePool: params.InfraMachinePool, + patchHelper: helper, + capiMachinePoolPatchHelper: capiMachinePoolPatchHelper, + ManagedClusterScoper: params.ManagedControlPlaneScope, }, nil } // ManagedMachinePoolScope defines the basic context for an actuator to operate upon. type ManagedMachinePoolScope struct { - Client client.Client - patchHelper *patch.Helper + Client client.Client + patchHelper *patch.Helper + capiMachinePoolPatchHelper *patch.Helper azure.ManagedClusterScoper Cluster *clusterv1.Cluster @@ -231,7 +238,7 @@ func (s *ManagedMachinePoolScope) UpdateDeleteStatus(condition clusterv1.Conditi } } -// UpdatePutStatus updates a condition on the AzureManagedControlPlane status after a PUT operation. +// UpdatePutStatus updates a condition on the AzureManagedMachinePool status after a PUT operation. func (s *ManagedMachinePoolScope) UpdatePutStatus(condition clusterv1.ConditionType, service string, err error) { switch { case err == nil: @@ -243,7 +250,7 @@ func (s *ManagedMachinePoolScope) UpdatePutStatus(condition clusterv1.ConditionT } } -// UpdatePatchStatus updates a condition on the AzureManagedControlPlane status after a PATCH operation. +// UpdatePatchStatus updates a condition on the AzureManagedMachinePool status after a PATCH operation. func (s *ManagedMachinePoolScope) UpdatePatchStatus(condition clusterv1.ConditionType, service string, err error) { switch { case err == nil: @@ -254,3 +261,21 @@ func (s *ManagedMachinePoolScope) UpdatePatchStatus(condition clusterv1.Conditio conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error()) } } + +// PatchCAPIMachinePoolObject persists the capi machinepool configuration and status. +func (s *ManagedMachinePoolScope) PatchCAPIMachinePoolObject(ctx context.Context) error { + return s.capiMachinePoolPatchHelper.Patch( + ctx, + s.MachinePool, + ) +} + +// UpdateCAPIMachinePoolReplicas updates the associated MachinePool replica count. +func (s *ManagedMachinePoolScope) UpdateCAPIMachinePoolReplicas(ctx context.Context, replicas *int32) { + s.MachinePool.Spec.Replicas = replicas +} + +// UpdateCAPIMachinePoolAnnotations updates the associated MachinePool annotation. +func (s *ManagedMachinePoolScope) UpdateCAPIMachinePoolAnnotations(ctx context.Context, key, value string) { + s.MachinePool.Annotations[key] = value +} diff --git a/azure/services/agentpools/agentpools.go b/azure/services/agentpools/agentpools.go index ba2a9ceed966..1b2477709bb6 100644 --- a/azure/services/agentpools/agentpools.go +++ b/azure/services/agentpools/agentpools.go @@ -22,6 +22,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" + "github.com/Azure/go-autorest/autorest/to" "github.com/google/go-cmp/cmp" "github.com/pkg/errors" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -43,6 +44,8 @@ type ManagedMachinePoolScope interface { SetAgentPoolProviderIDList([]string) SetAgentPoolReplicas(int32) SetAgentPoolReady(bool) + UpdateCAPIMachinePoolReplicas(ctx context.Context, replicas *int32) + UpdateCAPIMachinePoolAnnotations(ctx context.Context, key, value string) } // Service provides operations on Azure resources. @@ -127,6 +130,25 @@ func (s *Service) Reconcile(ctx context.Context) error { }, } + // When autoscaling is set, the count of the nodes differ based on the autoscaler and should not depend on the + // count present in MachinePool or AzureManagedMachinePool, hence we should not make an update API call based + // on difference in count. + if to.Bool(profile.EnableAutoScaling) && existingProfile.Count != nil { + if to.Bool(profile.EnableAutoScaling) && !to.Bool(existingProfile.EnableAutoScaling) { + s.scope.UpdateCAPIMachinePoolAnnotations(ctx, azure.ReplicasManagedByAutoscalerAnnotation, "true") + } + + if to.Int32(existingProfile.Count) != to.Int32(normalizedProfile.Count) { + s.scope.UpdateCAPIMachinePoolReplicas(ctx, existingProfile.Count) + } + normalizedProfile.Count = existingProfile.Count + } + + // set ReplicasManagedByAutoscalerAnnotation to false as it is disabled by the user. + if !to.Bool(profile.EnableAutoScaling) && to.Bool(existingProfile.EnableAutoScaling) { + s.scope.UpdateCAPIMachinePoolAnnotations(ctx, azure.ReplicasManagedByAutoscalerAnnotation, "false") + } + // Diff and check if we require an update diff := cmp.Diff(normalizedProfile, existingProfile) if diff != "" { diff --git a/exp/controllers/azuremanagedmachinepool_controller.go b/exp/controllers/azuremanagedmachinepool_controller.go index 7e2318677e54..32b6db1e2235 100644 --- a/exp/controllers/azuremanagedmachinepool_controller.go +++ b/exp/controllers/azuremanagedmachinepool_controller.go @@ -225,6 +225,9 @@ func (ammpr *AzureManagedMachinePoolReconciler) Reconcile(ctx context.Context, r if err := mcpScope.PatchObject(ctx); err != nil && reterr == nil { reterr = err } + if err := mcpScope.PatchCAPIMachinePoolObject(ctx); err != nil && reterr == nil { + reterr = err + } }() // Handle deleted clusters