Skip to content

Commit

Permalink
Updated the check for updating machinepool when using autoscalar
Browse files Browse the repository at this point in the history
  • Loading branch information
LochanRn committed Aug 21, 2022
1 parent fac7301 commit 8ecdb8b
Show file tree
Hide file tree
Showing 4 changed files with 65 additions and 11 deletions.
4 changes: 4 additions & 0 deletions azure/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,8 @@ const (
// See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
// for annotation formatting rules.
RGTagsLastAppliedAnnotation = "sigs.k8s.io/cluster-api-provider-azure-last-applied-tags-rg"

// ReplicasManagedByAutoscalerAnnotation is set to true in the corresponding capi machine pool
// when an external autoscaler manages the node count of the associated machine pool.
ReplicasManagedByAutoscalerAnnotation = "cluster.x-k8s.io/replicas-managed-by-autoscaler"
)
47 changes: 36 additions & 11 deletions azure/scope/managedmachinepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,21 +69,28 @@ func NewManagedMachinePoolScope(ctx context.Context, params ManagedMachinePoolSc
return nil, errors.Wrap(err, "failed to init patch helper")
}

capiMachinePoolPatchHelper, err := patch.NewHelper(params.MachinePool, params.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to init patch helper")
}

return &ManagedMachinePoolScope{
Client: params.Client,
Cluster: params.Cluster,
ControlPlane: params.ControlPlane,
MachinePool: params.MachinePool,
InfraMachinePool: params.InfraMachinePool,
patchHelper: helper,
ManagedClusterScoper: params.ManagedControlPlaneScope,
Client: params.Client,
Cluster: params.Cluster,
ControlPlane: params.ControlPlane,
MachinePool: params.MachinePool,
InfraMachinePool: params.InfraMachinePool,
patchHelper: helper,
capiMachinePoolPatchHelper: capiMachinePoolPatchHelper,
ManagedClusterScoper: params.ManagedControlPlaneScope,
}, nil
}

// ManagedMachinePoolScope defines the basic context for an actuator to operate upon.
type ManagedMachinePoolScope struct {
Client client.Client
patchHelper *patch.Helper
Client client.Client
patchHelper *patch.Helper
capiMachinePoolPatchHelper *patch.Helper

azure.ManagedClusterScoper
Cluster *clusterv1.Cluster
Expand Down Expand Up @@ -231,7 +238,7 @@ func (s *ManagedMachinePoolScope) UpdateDeleteStatus(condition clusterv1.Conditi
}
}

// UpdatePutStatus updates a condition on the AzureManagedControlPlane status after a PUT operation.
// UpdatePutStatus updates a condition on the AzureManagedMachinePool status after a PUT operation.
func (s *ManagedMachinePoolScope) UpdatePutStatus(condition clusterv1.ConditionType, service string, err error) {
switch {
case err == nil:
Expand All @@ -243,7 +250,7 @@ func (s *ManagedMachinePoolScope) UpdatePutStatus(condition clusterv1.ConditionT
}
}

// UpdatePatchStatus updates a condition on the AzureManagedControlPlane status after a PATCH operation.
// UpdatePatchStatus updates a condition on the AzureManagedMachinePool status after a PATCH operation.
func (s *ManagedMachinePoolScope) UpdatePatchStatus(condition clusterv1.ConditionType, service string, err error) {
switch {
case err == nil:
Expand All @@ -254,3 +261,21 @@ func (s *ManagedMachinePoolScope) UpdatePatchStatus(condition clusterv1.Conditio
conditions.MarkFalse(s.InfraMachinePool, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error())
}
}

// PatchCAPIMachinePoolObject persists the capi machinepool configuration and status.
func (s *ManagedMachinePoolScope) PatchCAPIMachinePoolObject(ctx context.Context) error {
return s.capiMachinePoolPatchHelper.Patch(
ctx,
s.MachinePool,
)
}

// UpdateCAPIMachinePoolReplicas updates the associated MachinePool replica count.
func (s *ManagedMachinePoolScope) UpdateCAPIMachinePoolReplicas(ctx context.Context, replicas *int32) {
s.MachinePool.Spec.Replicas = replicas
}

// UpdateCAPIMachinePoolAnnotations updates the associated MachinePool annotation.
func (s *ManagedMachinePoolScope) UpdateCAPIMachinePoolAnnotations(ctx context.Context, key, value string) {
s.MachinePool.Annotations[key] = value
}
22 changes: 22 additions & 0 deletions azure/services/agentpools/agentpools.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"time"

"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice"
"github.com/Azure/go-autorest/autorest/to"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
Expand All @@ -43,6 +44,8 @@ type ManagedMachinePoolScope interface {
SetAgentPoolProviderIDList([]string)
SetAgentPoolReplicas(int32)
SetAgentPoolReady(bool)
UpdateCAPIMachinePoolReplicas(ctx context.Context, replicas *int32)
UpdateCAPIMachinePoolAnnotations(ctx context.Context, key, value string)
}

// Service provides operations on Azure resources.
Expand Down Expand Up @@ -127,6 +130,25 @@ func (s *Service) Reconcile(ctx context.Context) error {
},
}

// When autoscaling is set, the count of the nodes differ based on the autoscaler and should not depend on the
// count present in MachinePool or AzureManagedMachinePool, hence we should not make an update API call based
// on difference in count.
if to.Bool(profile.EnableAutoScaling) && existingProfile.Count != nil {
if to.Bool(profile.EnableAutoScaling) && !to.Bool(existingProfile.EnableAutoScaling) {
s.scope.UpdateCAPIMachinePoolAnnotations(ctx, azure.ReplicasManagedByAutoscalerAnnotation, "true")
}

if to.Int32(existingProfile.Count) != to.Int32(normalizedProfile.Count) {
s.scope.UpdateCAPIMachinePoolReplicas(ctx, existingProfile.Count)
}
normalizedProfile.Count = existingProfile.Count
}

// set ReplicasManagedByAutoscalerAnnotation to false as it is disabled by the user.
if !to.Bool(profile.EnableAutoScaling) && to.Bool(existingProfile.EnableAutoScaling) {
s.scope.UpdateCAPIMachinePoolAnnotations(ctx, azure.ReplicasManagedByAutoscalerAnnotation, "false")
}

// Diff and check if we require an update
diff := cmp.Diff(normalizedProfile, existingProfile)
if diff != "" {
Expand Down
3 changes: 3 additions & 0 deletions exp/controllers/azuremanagedmachinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,9 @@ func (ammpr *AzureManagedMachinePoolReconciler) Reconcile(ctx context.Context, r
if err := mcpScope.PatchObject(ctx); err != nil && reterr == nil {
reterr = err
}
if err := mcpScope.PatchCAPIMachinePoolObject(ctx); err != nil && reterr == nil {
reterr = err
}
}()

// Handle deleted clusters
Expand Down

0 comments on commit 8ecdb8b

Please sign in to comment.