From aa85d7da8408f2abded1be73f3b5fc97fd82ac4a Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Fri, 4 Mar 2022 16:14:58 -0800 Subject: [PATCH] Make managedcluster reconcile and delete async --- azure/interfaces.go | 2 + azure/mock_azure/azure_mock.go | 14 + azure/scope/managedcontrolplane.go | 40 +- azure/scope/managedcontrolplane_test.go | 7 +- azure/services/availabilitysets/spec.go | 5 + .../{azurebastion_spec.go => spec.go} | 5 + azure/services/disks/spec.go | 5 + azure/services/groups/spec.go | 5 + azure/services/inboundnatrules/spec.go | 5 + azure/services/loadbalancers/spec.go | 5 + azure/services/managedclusters/client.go | 152 +++++-- .../managedclusters/managedclusters.go | 392 ++-------------- .../managedclusters/managedclusters_test.go | 308 +++++-------- .../mock_managedclusters/client_mock.go | 73 +-- .../mock_managedclusters/doc.go | 2 +- .../managedclusters_mock.go | 113 +++-- azure/services/managedclusters/spec.go | 424 ++++++++++++++++++ azure/services/managedclusters/spec_test.go | 164 +++++++ azure/services/natgateways/spec.go | 5 + azure/services/networkinterfaces/spec.go | 5 + azure/services/routetables/spec.go | 5 + azure/services/securitygroups/spec.go | 5 + azure/services/subnets/spec.go | 5 + azure/services/virtualmachines/spec.go | 5 + azure/services/virtualnetworks/spec.go | 5 + azure/services/vnetpeerings/spec.go | 5 + azure/types.go | 231 +++------- 27 files changed, 1122 insertions(+), 870 deletions(-) rename azure/services/bastionhosts/{azurebastion_spec.go => spec.go} (95%) create mode 100644 azure/services/managedclusters/spec.go create mode 100644 azure/services/managedclusters/spec_test.go diff --git a/azure/interfaces.go b/azure/interfaces.go index bcf75522e3b2..dfca0d4af938 100644 --- a/azure/interfaces.go +++ b/azure/interfaces.go @@ -106,6 +106,8 @@ type ResourceSpecGetter interface { OwnerResourceName() string // ResourceGroupName returns the name of the resource group the resource is in. ResourceGroupName() string + // CustomHeaders returns the headers that should be added to Azure API calls. + CustomHeaders() map[string]string // Parameters takes the existing resource and returns the desired parameters of the resource. // If the resource does not exist, or we do not care about existing parameters to update the resource, existing should be nil. // If no update is needed on the resource, Parameters should return nil. diff --git a/azure/mock_azure/azure_mock.go b/azure/mock_azure/azure_mock.go index 0f1d092bf186..121dd2f267cf 100644 --- a/azure/mock_azure/azure_mock.go +++ b/azure/mock_azure/azure_mock.go @@ -1349,6 +1349,20 @@ func (m *MockResourceSpecGetter) EXPECT() *MockResourceSpecGetterMockRecorder { return m.recorder } +// CustomHeaders mocks base method. +func (m *MockResourceSpecGetter) CustomHeaders() map[string]string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CustomHeaders") + ret0, _ := ret[0].(map[string]string) + return ret0 +} + +// CustomHeaders indicates an expected call of CustomHeaders. +func (mr *MockResourceSpecGetterMockRecorder) CustomHeaders() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CustomHeaders", reflect.TypeOf((*MockResourceSpecGetter)(nil).CustomHeaders)) +} + // OwnerResourceName mocks base method. func (m *MockResourceSpecGetter) OwnerResourceName() string { m.ctrl.T.Helper() diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index 434628bc62b4..6b30951d440a 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -33,10 +33,12 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/managedclusters" "sigs.k8s.io/cluster-api-provider-azure/azure/services/subnets" "sigs.k8s.io/cluster-api-provider-azure/azure/services/virtualnetworks" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/util/futures" + "sigs.k8s.io/cluster-api-provider-azure/util/maps" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -396,18 +398,26 @@ func (s *ManagedControlPlaneScope) ManagedClusterAnnotations() map[string]string } // ManagedClusterSpec returns the managed cluster spec. -func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpec, error) { +func (s *ManagedControlPlaneScope) ManagedClusterSpec(ctx context.Context) (azure.ResourceSpecGetter, error) { decodedSSHPublicKey, err := base64.StdEncoding.DecodeString(s.ControlPlane.Spec.SSHPublicKey) if err != nil { - return azure.ManagedClusterSpec{}, errors.Wrap(err, "failed to decode SSHPublicKey") + return &managedclusters.ManagedClusterSpec{}, errors.Wrap(err, "failed to decode SSHPublicKey") } - managedClusterSpec := azure.ManagedClusterSpec{ + // TODO: Are there any side effects if we do this on every reconcile even when it's an update? + agentPools, err := s.GetAllAgentPoolSpecs(ctx) + if err != nil { + return &managedclusters.ManagedClusterSpec{}, errors.Wrapf(err, "failed to get system agent pool specs for managed cluster %s", s.ClusterName()) + } + + managedClusterSpec := managedclusters.ManagedClusterSpec{ Name: s.ControlPlane.Name, - ResourceGroupName: s.ControlPlane.Spec.ResourceGroupName, + ResourceGroup: s.ControlPlane.Spec.ResourceGroupName, NodeResourceGroupName: s.ControlPlane.Spec.NodeResourceGroupName, + AgentPools: agentPools, Location: s.ControlPlane.Spec.Location, Tags: s.ControlPlane.Spec.AdditionalTags, + Headers: maps.FilterByKeyPrefix(s.ManagedClusterAnnotations(), azure.CustomHeaderPrefix), Version: strings.TrimPrefix(s.ControlPlane.Spec.Version, "v"), SSHPublicKey: string(decodedSSHPublicKey), DNSServiceIP: s.ControlPlane.Spec.DNSServiceIP, @@ -434,7 +444,7 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpe // A user may provide zero or one CIDR blocks. If they provide an empty array, // we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR. if len(clusterNetwork.Services.CIDRBlocks) > 1 { - return azure.ManagedClusterSpec{}, errors.New("managed control planes only allow one service cidr") + return &managedclusters.ManagedClusterSpec{}, errors.New("managed control planes only allow one service cidr") } if len(clusterNetwork.Services.CIDRBlocks) == 1 { managedClusterSpec.ServiceCIDR = clusterNetwork.Services.CIDRBlocks[0] @@ -444,7 +454,7 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpe // A user may provide zero or one CIDR blocks. If they provide an empty array, // we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR. if len(clusterNetwork.Pods.CIDRBlocks) > 1 { - return azure.ManagedClusterSpec{}, errors.New("managed control planes only allow one service cidr") + return &managedclusters.ManagedClusterSpec{}, errors.New("managed control planes only allow one service cidr") } if len(clusterNetwork.Pods.CIDRBlocks) == 1 { managedClusterSpec.PodCIDR = clusterNetwork.Pods.CIDRBlocks[0] @@ -454,20 +464,20 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpe if s.ControlPlane.Spec.DNSServiceIP != nil { if managedClusterSpec.ServiceCIDR == "" { - return azure.ManagedClusterSpec{}, fmt.Errorf(s.Cluster.Name + " cluster serviceCIDR must be specified if specifying DNSServiceIP") + return &managedclusters.ManagedClusterSpec{}, fmt.Errorf(s.Cluster.Name + " cluster serviceCIDR must be specified if specifying DNSServiceIP") } _, cidr, err := net.ParseCIDR(managedClusterSpec.ServiceCIDR) if err != nil { - return azure.ManagedClusterSpec{}, fmt.Errorf("failed to parse cluster service cidr: %w", err) + return &managedclusters.ManagedClusterSpec{}, fmt.Errorf("failed to parse cluster service cidr: %w", err) } ip := net.ParseIP(*s.ControlPlane.Spec.DNSServiceIP) if !cidr.Contains(ip) { - return azure.ManagedClusterSpec{}, fmt.Errorf(s.ControlPlane.Name + " DNSServiceIP must reside within the associated cluster serviceCIDR") + return &managedclusters.ManagedClusterSpec{}, fmt.Errorf(s.ControlPlane.Name + " DNSServiceIP must reside within the associated cluster serviceCIDR") } } if s.ControlPlane.Spec.AADProfile != nil { - managedClusterSpec.AADProfile = &azure.AADProfile{ + managedClusterSpec.AADProfile = &managedclusters.AADProfile{ Managed: s.ControlPlane.Spec.AADProfile.Managed, EnableAzureRBAC: s.ControlPlane.Spec.AADProfile.Managed, AdminGroupObjectIDs: s.ControlPlane.Spec.AADProfile.AdminGroupObjectIDs, @@ -476,7 +486,7 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpe if s.ControlPlane.Spec.AddonProfiles != nil { for _, profile := range s.ControlPlane.Spec.AddonProfiles { - managedClusterSpec.AddonProfiles = append(managedClusterSpec.AddonProfiles, azure.AddonProfile{ + managedClusterSpec.AddonProfiles = append(managedClusterSpec.AddonProfiles, managedclusters.AddonProfile{ Name: profile.Name, Enabled: profile.Enabled, Config: profile.Config, @@ -485,13 +495,13 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpe } if s.ControlPlane.Spec.SKU != nil { - managedClusterSpec.SKU = &azure.SKU{ + managedClusterSpec.SKU = &managedclusters.SKU{ Tier: string(s.ControlPlane.Spec.SKU.Tier), } } if s.ControlPlane.Spec.LoadBalancerProfile != nil { - managedClusterSpec.LoadBalancerProfile = &azure.LoadBalancerProfile{ + managedClusterSpec.LoadBalancerProfile = &managedclusters.LoadBalancerProfile{ ManagedOutboundIPs: s.ControlPlane.Spec.LoadBalancerProfile.ManagedOutboundIPs, OutboundIPPrefixes: s.ControlPlane.Spec.LoadBalancerProfile.OutboundIPPrefixes, OutboundIPs: s.ControlPlane.Spec.LoadBalancerProfile.OutboundIPs, @@ -501,7 +511,7 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpe } if s.ControlPlane.Spec.APIServerAccessProfile != nil { - managedClusterSpec.APIServerAccessProfile = &azure.APIServerAccessProfile{ + managedClusterSpec.APIServerAccessProfile = &managedclusters.APIServerAccessProfile{ AuthorizedIPRanges: s.ControlPlane.Spec.APIServerAccessProfile.AuthorizedIPRanges, EnablePrivateCluster: s.ControlPlane.Spec.APIServerAccessProfile.EnablePrivateCluster, PrivateDNSZone: s.ControlPlane.Spec.APIServerAccessProfile.PrivateDNSZone, @@ -509,7 +519,7 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() (azure.ManagedClusterSpe } } - return managedClusterSpec, nil + return &managedClusterSpec, nil } // GetAllAgentPoolSpecs gets a slice of azure.AgentPoolSpec for the list of agent pools. diff --git a/azure/scope/managedcontrolplane_test.go b/azure/scope/managedcontrolplane_test.go index 80bf75e3f853..02632dd0e096 100644 --- a/azure/scope/managedcontrolplane_test.go +++ b/azure/scope/managedcontrolplane_test.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/managedclusters" infrav1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -678,7 +679,7 @@ func TestManagedControlPlaneScope_AddonProfiles(t *testing.T) { cases := []struct { Name string Input ManagedControlPlaneScopeParams - Expected azure.ManagedClusterSpec + Expected managedclusters.ManagedClusterSpec }{ { Name: "Without add-ons", @@ -705,7 +706,7 @@ func TestManagedControlPlaneScope_AddonProfiles(t *testing.T) { InfraMachinePool: getAzureMachinePool("pool0", infrav1.NodePoolModeSystem), PatchTarget: getAzureMachinePool("pool0", infrav1.NodePoolModeSystem), }, - Expected: azure.ManagedClusterSpec{ + Expected: managedclusters.ManagedClusterSpec{ Name: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", }, @@ -739,7 +740,7 @@ func TestManagedControlPlaneScope_AddonProfiles(t *testing.T) { InfraMachinePool: getAzureMachinePool("pool0", infrav1.NodePoolModeSystem), PatchTarget: getAzureMachinePool("pool0", infrav1.NodePoolModeSystem), }, - Expected: azure.ManagedClusterSpec{ + Expected: managedclusters.ManagedClusterSpec{ Name: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", AddonProfiles: []azure.AddonProfile{ diff --git a/azure/services/availabilitysets/spec.go b/azure/services/availabilitysets/spec.go index a21319911016..d18afe507eee 100644 --- a/azure/services/availabilitysets/spec.go +++ b/azure/services/availabilitysets/spec.go @@ -52,6 +52,11 @@ func (s *AvailabilitySetSpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for availability sets. +func (s *AvailabilitySetSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the availability set. func (s *AvailabilitySetSpec) Parameters(existing interface{}) (params interface{}, err error) { if existing != nil { diff --git a/azure/services/bastionhosts/azurebastion_spec.go b/azure/services/bastionhosts/spec.go similarity index 95% rename from azure/services/bastionhosts/azurebastion_spec.go rename to azure/services/bastionhosts/spec.go index b70dda169f7b..5b6c269b336b 100644 --- a/azure/services/bastionhosts/azurebastion_spec.go +++ b/azure/services/bastionhosts/spec.go @@ -59,6 +59,11 @@ func (s *AzureBastionSpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for bastion hosts. +func (s *AzureBastionSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the bastion host. func (s *AzureBastionSpec) Parameters(existing interface{}) (paramteres interface{}, err error) { if existing != nil { diff --git a/azure/services/disks/spec.go b/azure/services/disks/spec.go index 6dd9cd8ee461..dd2f2dc94c88 100644 --- a/azure/services/disks/spec.go +++ b/azure/services/disks/spec.go @@ -37,6 +37,11 @@ func (s *DiskSpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for disks. +func (s *DiskSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters is a no-op for disks. func (s *DiskSpec) Parameters(existing interface{}) (params interface{}, err error) { return nil, nil diff --git a/azure/services/groups/spec.go b/azure/services/groups/spec.go index bc8bcc686d4c..9bf065b3d01d 100644 --- a/azure/services/groups/spec.go +++ b/azure/services/groups/spec.go @@ -47,6 +47,11 @@ func (s *GroupSpec) OwnerResourceName() string { return "" // not applicable } +// CustomHeaders is not implemented for groups. +func (s *GroupSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the group. func (s *GroupSpec) Parameters(existing interface{}) (params interface{}, err error) { if existing != nil { diff --git a/azure/services/inboundnatrules/spec.go b/azure/services/inboundnatrules/spec.go index f300b99b9267..8e32f8c31e7b 100644 --- a/azure/services/inboundnatrules/spec.go +++ b/azure/services/inboundnatrules/spec.go @@ -46,6 +46,11 @@ func (s *InboundNatSpec) OwnerResourceName() string { return s.LoadBalancerName } +// CustomHeaders is not implemented for inbound NAT rules. +func (s *InboundNatSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the inbound NAT rule. func (s *InboundNatSpec) Parameters(existing interface{}) (parameters interface{}, err error) { if existing != nil { diff --git a/azure/services/loadbalancers/spec.go b/azure/services/loadbalancers/spec.go index 3948b56c0ee6..f2860df18246 100644 --- a/azure/services/loadbalancers/spec.go +++ b/azure/services/loadbalancers/spec.go @@ -60,6 +60,11 @@ func (s *LBSpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for load balancers. +func (s *LBSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the load balancer. func (s *LBSpec) Parameters(existing interface{}) (parameters interface{}, err error) { var ( diff --git a/azure/services/managedclusters/client.go b/azure/services/managedclusters/client.go index 29ac9e537e58..0b61b23bb93e 100644 --- a/azure/services/managedclusters/client.go +++ b/azure/services/managedclusters/client.go @@ -18,32 +18,31 @@ package managedclusters import ( "context" + "encoding/json" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" "github.com/Azure/go-autorest/autorest" + azureautorest "github.com/Azure/go-autorest/autorest/azure" "github.com/pkg/errors" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) -// Client wraps go-sdk. -type Client interface { - Get(context.Context, string, string) (containerservice.ManagedCluster, error) +// CredentialGetter is a helper interface for getting managed cluster credentials. +type CredentialGetter interface { GetCredentials(context.Context, string, string) ([]byte, error) - CreateOrUpdate(context.Context, string, string, containerservice.ManagedCluster, map[string]string) (containerservice.ManagedCluster, error) - Delete(context.Context, string, string) error } -// AzureClient contains the Azure go-sdk Client. -type AzureClient struct { +// azureClient contains the Azure go-sdk Client. +type azureClient struct { managedclusters containerservice.ManagedClustersClient } -var _ Client = &AzureClient{} - -// NewClient creates a new VM client from subscription ID. -func NewClient(auth azure.Authorizer) *AzureClient { - return &AzureClient{ +// newClient creates a new managed cluster client from an authorizer. +func newClient(auth azure.Authorizer) *azureClient { + return &azureClient{ managedclusters: newManagedClustersClient(auth.SubscriptionID(), auth.BaseURI(), auth.Authorizer()), } } @@ -56,13 +55,16 @@ func newManagedClustersClient(subscriptionID string, baseURI string, authorizer } // Get gets a managed cluster. -func (ac *AzureClient) Get(ctx context.Context, resourceGroupName, name string) (containerservice.ManagedCluster, error) { - return ac.managedclusters.Get(ctx, resourceGroupName, name) +func (ac *azureClient) Get(ctx context.Context, spec azure.ResourceSpecGetter) (result interface{}, err error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.azureClient.Get") + defer done() + + return ac.managedclusters.Get(ctx, spec.ResourceGroupName(), spec.ResourceName()) } // GetCredentials fetches the admin kubeconfig for a managed cluster. -func (ac *AzureClient) GetCredentials(ctx context.Context, resourceGroupName, name string) ([]byte, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.AzureClient.GetCredentials") +func (ac *azureClient) GetCredentials(ctx context.Context, resourceGroupName, name string) ([]byte, error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.azureClient.GetCredentials") defer done() credentialList, err := ac.managedclusters.ListClusterAdminCredentials(ctx, resourceGroupName, name, "") @@ -77,45 +79,115 @@ func (ac *AzureClient) GetCredentials(ctx context.Context, resourceGroupName, na return *(*credentialList.Kubeconfigs)[0].Value, nil } -// CreateOrUpdate creates or updates a managed cluster. -func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, name string, cluster containerservice.ManagedCluster, headers map[string]string) (containerservice.ManagedCluster, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.AzureClient.CreateOrUpdate") +// CreateOrUpdateAsync creates or updates a managed cluster. +// It sends a PUT request to Azure and if accepted without error, the func will return a Future which can be used to track the ongoing +// progress of the operation. +func (ac *azureClient) CreateOrUpdateAsync(ctx context.Context, spec azure.ResourceSpecGetter, parameters interface{}) (result interface{}, future azureautorest.FutureAPI, err error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.azureClient.CreateOrUpdate") defer done() - preparer, err := ac.managedclusters.CreateOrUpdatePreparer(ctx, resourceGroupName, name, cluster) + managedcluster, ok := parameters.(containerservice.ManagedCluster) + if !ok { + return nil, nil, errors.Errorf("%T is not a containerservice.ManagedCluster", parameters) + } + + preparer, err := ac.managedclusters.CreateOrUpdatePreparer(ctx, spec.ResourceGroupName(), spec.ResourceName(), managedcluster) if err != nil { - return containerservice.ManagedCluster{}, errors.Wrap(err, "failed to prepare operation") + return nil, nil, errors.Wrap(err, "failed to prepare operation") } - for key, value := range headers { + + for key, value := range spec.CustomHeaders() { preparer.Header.Add(key, value) } - future, err := ac.managedclusters.CreateOrUpdateSender(preparer) + createFuture, err := ac.managedclusters.CreateOrUpdateSender(preparer) if err != nil { - return containerservice.ManagedCluster{}, errors.Wrap(err, "failed to begin operation") + return nil, nil, err } - if err := future.WaitForCompletionRef(ctx, ac.managedclusters.Client); err != nil { - return containerservice.ManagedCluster{}, errors.Wrap(err, "failed to end operation") + + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureCallTimeout) + defer cancel() + + err = createFuture.WaitForCompletionRef(ctx, ac.managedclusters.Client) + if err != nil { + // if an error occurs, return the future. + // this means the long-running operation didn't finish in the specified timeout. + return nil, &createFuture, err } - managedCluster, err := future.Result(ac.managedclusters) - return managedCluster, err + + result, err = createFuture.Result(ac.managedclusters) + // if the operation completed, return a nil future + return result, nil, err } -// Delete deletes a managed cluster. -func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, name string) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.AzureClient.Delete") +// DeleteAsync deletes a managed cluster asynchronously. DeleteAsync sends a DELETE +// request to Azure and if accepted without error, the func will return a Future which can be used to track the ongoing +// progress of the operation. +func (ac *azureClient) DeleteAsync(ctx context.Context, spec azure.ResourceSpecGetter) (future azureautorest.FutureAPI, err error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.azureClient.DeleteAsync") defer done() - future, err := ac.managedclusters.Delete(ctx, resourceGroupName, name) + deleteFuture, err := ac.managedclusters.Delete(ctx, spec.ResourceGroupName(), spec.ResourceName()) if err != nil { - if azure.ResourceGroupNotFound(err) || azure.ResourceNotFound(err) { - return nil - } - return errors.Wrap(err, "failed to begin operation") + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureCallTimeout) + defer cancel() + + err = deleteFuture.WaitForCompletionRef(ctx, ac.managedclusters.Client) + if err != nil { + // if an error occurs, return the future. + // this means the long-running operation didn't finish in the specified timeout. + return &deleteFuture, err } - if err := future.WaitForCompletionRef(ctx, ac.managedclusters.Client); err != nil { - return errors.Wrap(err, "failed to end operation") + _, err = deleteFuture.Result(ac.managedclusters) + // if the operation completed, return a nil future. + return nil, err +} + +// IsDone returns true if the long-running operation has completed. +func (ac *azureClient) IsDone(ctx context.Context, future azureautorest.FutureAPI) (bool, error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.azureClient.IsDone") + defer done() + + isDone, err := future.DoneWithContext(ctx, ac.managedclusters) + if err != nil { + return false, errors.Wrap(err, "failed checking if the operation was complete") + } + + return isDone, nil +} + +// Result fetches the result of a long-running operation future. +func (ac *azureClient) Result(ctx context.Context, future azureautorest.FutureAPI, futureType string) (result interface{}, err error) { + _, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.azureClient.Result") + defer done() + + if future == nil { + return nil, errors.Errorf("cannot get result from nil future") + } + + switch futureType { + case infrav1.PutFuture: + // Marshal and Unmarshal the future to put it into the correct future type so we can access the Result function. + // Unfortunately the FutureAPI can't be casted directly to ManagedClustersCreateOrUpdateFuture because it is a azureautorest.Future, which doesn't implement the Result function. See PR #1686 for discussion on alternatives. + // It was converted back to a generic azureautorest.Future from the CAPZ infrav1.Future type stored in Status: https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/main/azure/converters/futures.go#L49. + var createFuture *containerservice.ManagedClustersCreateOrUpdateFuture + jsonData, err := future.MarshalJSON() + if err != nil { + return nil, errors.Wrap(err, "failed to marshal future") + } + if err := json.Unmarshal(jsonData, &createFuture); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal future data") + } + return createFuture.Result(ac.managedclusters) + + case infrav1.DeleteFuture: + // Delete does not return a result managed cluster. + return nil, nil + + default: + return nil, errors.Errorf("unknown future type %q", futureType) } - _, err = future.Result(ac.managedclusters) - return err } diff --git a/azure/services/managedclusters/managedclusters.go b/azure/services/managedclusters/managedclusters.go index 7978e7c5a254..d4ff664f716e 100644 --- a/azure/services/managedclusters/managedclusters.go +++ b/azure/services/managedclusters/managedclusters.go @@ -18,36 +18,25 @@ package managedclusters import ( "context" - "fmt" - "net" - "time" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" - "github.com/Azure/go-autorest/autorest/to" - "github.com/google/go-cmp/cmp" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" - infrav1alpha4 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/util/maps" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/async" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -const serviceName = "managedclusters" - -var ( - defaultUser = "azureuser" - managedIdentity = "msi" -) +const serviceName = "managedcluster" // ManagedClusterScope defines the scope interface for a managed cluster. type ManagedClusterScope interface { azure.ClusterDescriber - ManagedClusterAnnotations() map[string]string - ManagedClusterSpec() (azure.ManagedClusterSpec, error) - GetAllAgentPoolSpecs(ctx context.Context) ([]azure.AgentPoolSpec, error) + azure.AsyncStatusUpdater + ManagedClusterSpec(context.Context) (azure.ResourceSpecGetter, error) SetControlPlaneEndpoint(clusterv1.APIEndpoint) MakeEmptyKubeConfigSecret() corev1.Secret GetKubeConfigData() []byte @@ -57,117 +46,17 @@ type ManagedClusterScope interface { // Service provides operations on azure resources. type Service struct { Scope ManagedClusterScope - Client -} - -func convertToResourceReferences(resources []string) *[]containerservice.ResourceReference { - resourceReferences := make([]containerservice.ResourceReference, len(resources)) - for i := range resources { - resourceReferences[i] = containerservice.ResourceReference{ID: &resources[i]} - } - return &resourceReferences -} - -func computeDiffOfNormalizedClusters(managedCluster containerservice.ManagedCluster, existingMC containerservice.ManagedCluster) string { - // Normalize properties for the desired (CR spec) and existing managed - // cluster, so that we check only those fields that were specified in - // the initial CreateOrUpdate request and that can be modified. - // Without comparing to normalized properties, we would always get a - // difference in desired and existing, which would result in sending - // unnecessary Azure API requests. - propertiesNormalized := &containerservice.ManagedClusterProperties{ - KubernetesVersion: managedCluster.ManagedClusterProperties.KubernetesVersion, - NetworkProfile: &containerservice.NetworkProfile{}, - } - - existingMCPropertiesNormalized := &containerservice.ManagedClusterProperties{ - KubernetesVersion: existingMC.ManagedClusterProperties.KubernetesVersion, - NetworkProfile: &containerservice.NetworkProfile{}, - } - - if managedCluster.AadProfile != nil { - propertiesNormalized.AadProfile = &containerservice.ManagedClusterAADProfile{ - Managed: managedCluster.AadProfile.Managed, - EnableAzureRBAC: managedCluster.AadProfile.EnableAzureRBAC, - AdminGroupObjectIDs: managedCluster.AadProfile.AdminGroupObjectIDs, - } - } - - if existingMC.AadProfile != nil { - existingMCPropertiesNormalized.AadProfile = &containerservice.ManagedClusterAADProfile{ - Managed: existingMC.AadProfile.Managed, - EnableAzureRBAC: existingMC.AadProfile.EnableAzureRBAC, - AdminGroupObjectIDs: existingMC.AadProfile.AdminGroupObjectIDs, - } - } - - if managedCluster.AddonProfiles != nil { - for k, v := range managedCluster.AddonProfiles { - if propertiesNormalized.AddonProfiles == nil { - propertiesNormalized.AddonProfiles = map[string]*containerservice.ManagedClusterAddonProfile{} - } - propertiesNormalized.AddonProfiles[k] = &containerservice.ManagedClusterAddonProfile{ - Enabled: v.Enabled, - Config: v.Config, - } - } - } - - if existingMC.AddonProfiles != nil { - for k, v := range existingMC.AddonProfiles { - if existingMCPropertiesNormalized.AddonProfiles == nil { - existingMCPropertiesNormalized.AddonProfiles = map[string]*containerservice.ManagedClusterAddonProfile{} - } - existingMCPropertiesNormalized.AddonProfiles[k] = &containerservice.ManagedClusterAddonProfile{ - Enabled: v.Enabled, - Config: v.Config, - } - } - } - - if managedCluster.NetworkProfile != nil { - propertiesNormalized.NetworkProfile.LoadBalancerProfile = managedCluster.NetworkProfile.LoadBalancerProfile - } - - if existingMC.NetworkProfile != nil { - existingMCPropertiesNormalized.NetworkProfile.LoadBalancerProfile = existingMC.NetworkProfile.LoadBalancerProfile - } - - if managedCluster.APIServerAccessProfile != nil { - propertiesNormalized.APIServerAccessProfile = &containerservice.ManagedClusterAPIServerAccessProfile{ - AuthorizedIPRanges: managedCluster.APIServerAccessProfile.AuthorizedIPRanges, - } - } - - if existingMC.APIServerAccessProfile != nil { - existingMCPropertiesNormalized.APIServerAccessProfile = &containerservice.ManagedClusterAPIServerAccessProfile{ - AuthorizedIPRanges: existingMC.APIServerAccessProfile.AuthorizedIPRanges, - } - } - - clusterNormalized := &containerservice.ManagedCluster{ - ManagedClusterProperties: propertiesNormalized, - } - existingMCClusterNormalized := &containerservice.ManagedCluster{ - ManagedClusterProperties: existingMCPropertiesNormalized, - } - - if managedCluster.Sku != nil { - clusterNormalized.Sku = managedCluster.Sku - } - if existingMC.Sku != nil { - existingMCClusterNormalized.Sku = existingMC.Sku - } - - diff := cmp.Diff(clusterNormalized, existingMCClusterNormalized) - return diff + async.Reconciler + CredentialGetter } // New creates a new service. func New(scope ManagedClusterScope) *Service { + client := newClient(scope) return &Service{ - Scope: scope, - Client: NewClient(scope), + Scope: scope, + Reconciler: async.New(scope, client, client), + CredentialGetter: client, } } @@ -181,230 +70,41 @@ func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.Service.Reconcile") defer done() - managedClusterSpec, err := s.Scope.ManagedClusterSpec() + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) + defer cancel() + + managedClusterSpec, err := s.Scope.ManagedClusterSpec(ctx) if err != nil { return errors.Wrap(err, "failed to get managed cluster spec") + } else if managedClusterSpec == nil { + return nil } - isCreate := false - existingMC, err := s.Client.Get(ctx, managedClusterSpec.ResourceGroupName, managedClusterSpec.Name) - // Transient or other failure not due to 404 - if err != nil && !azure.ResourceNotFound(err) { - return azure.WithTransientError(errors.Wrap(err, "failed to fetch existing managed cluster"), 20*time.Second) - } - - // We are creating this cluster for the first time. - // Configure the agent pool, rest will be handled by machinepool controller - // We do this here because AKS will only let us mutate agent pools via managed - // clusters API at create time, not update. - if azure.ResourceNotFound(err) { - isCreate = true - // Add system agent pool to cluster spec that will be submitted to the API - managedClusterSpec.AgentPools, err = s.Scope.GetAllAgentPoolSpecs(ctx) - if err != nil { - return errors.Wrapf(err, "failed to get system agent pool specs for managed cluster %s", s.Scope.ClusterName()) - } - } - - managedCluster := containerservice.ManagedCluster{ - Identity: &containerservice.ManagedClusterIdentity{ - Type: containerservice.ResourceIdentityTypeSystemAssigned, - }, - Location: &managedClusterSpec.Location, - Tags: *to.StringMapPtr(managedClusterSpec.Tags), - ManagedClusterProperties: &containerservice.ManagedClusterProperties{ - NodeResourceGroup: &managedClusterSpec.NodeResourceGroupName, - EnableRBAC: to.BoolPtr(true), - DNSPrefix: &managedClusterSpec.Name, - KubernetesVersion: &managedClusterSpec.Version, - LinuxProfile: &containerservice.LinuxProfile{ - AdminUsername: &defaultUser, - SSH: &containerservice.SSHConfiguration{ - PublicKeys: &[]containerservice.SSHPublicKey{ - { - KeyData: &managedClusterSpec.SSHPublicKey, - }, - }, - }, - }, - ServicePrincipalProfile: &containerservice.ManagedClusterServicePrincipalProfile{ - ClientID: &managedIdentity, - }, - AgentPoolProfiles: &[]containerservice.ManagedClusterAgentPoolProfile{}, - NetworkProfile: &containerservice.NetworkProfile{ - NetworkPlugin: containerservice.NetworkPlugin(managedClusterSpec.NetworkPlugin), - LoadBalancerSku: containerservice.LoadBalancerSku(managedClusterSpec.LoadBalancerSKU), - NetworkPolicy: containerservice.NetworkPolicy(managedClusterSpec.NetworkPolicy), - }, - }, - } - - if managedClusterSpec.PodCIDR != "" { - managedCluster.NetworkProfile.PodCidr = &managedClusterSpec.PodCIDR - } - - if managedClusterSpec.ServiceCIDR != "" { - if managedClusterSpec.DNSServiceIP == nil { - managedCluster.NetworkProfile.ServiceCidr = &managedClusterSpec.ServiceCIDR - ip, _, err := net.ParseCIDR(managedClusterSpec.ServiceCIDR) - if err != nil { - return fmt.Errorf("failed to parse service cidr: %w", err) - } - // HACK: set the last octet of the IP to .10 - // This ensures the dns IP is valid in the service cidr without forcing the user - // to specify it in both the Capi cluster and the Azure control plane. - // https://golang.org/src/net/ip.go#L48 - ip[15] = byte(10) - dnsIP := ip.String() - managedCluster.NetworkProfile.DNSServiceIP = &dnsIP - } else { - managedCluster.NetworkProfile.DNSServiceIP = managedClusterSpec.DNSServiceIP + result, resultErr := s.CreateResource(ctx, managedClusterSpec, serviceName) + if resultErr == nil { + managedCluster, ok := result.(containerservice.ManagedCluster) + if !ok { + return errors.Errorf("%T is not a containerservice.ManagedCluster", result) } - } - - for i := range managedClusterSpec.AgentPools { - pool := managedClusterSpec.AgentPools[i] - profile := containerservice.ManagedClusterAgentPoolProfile{ - Name: &pool.Name, - VMSize: &pool.SKU, - OsDiskSizeGB: &pool.OSDiskSizeGB, - Count: &pool.Replicas, - Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, - VnetSubnetID: &managedClusterSpec.VnetSubnetID, - Mode: containerservice.AgentPoolMode(pool.Mode), - AvailabilityZones: &pool.AvailabilityZones, - MaxPods: pool.MaxPods, - OrchestratorVersion: pool.Version, - OsDiskType: containerservice.OSDiskType(to.String(pool.OsDiskType)), - } - *managedCluster.AgentPoolProfiles = append(*managedCluster.AgentPoolProfiles, profile) - } - - if managedClusterSpec.AADProfile != nil { - managedCluster.AadProfile = &containerservice.ManagedClusterAADProfile{ - Managed: &managedClusterSpec.AADProfile.Managed, - EnableAzureRBAC: &managedClusterSpec.AADProfile.EnableAzureRBAC, - AdminGroupObjectIDs: &managedClusterSpec.AADProfile.AdminGroupObjectIDs, - } - } - - handleAddonProfiles(managedCluster, managedClusterSpec) - - if managedClusterSpec.SKU != nil { - tierName := containerservice.ManagedClusterSKUTier(managedClusterSpec.SKU.Tier) - managedCluster.Sku = &containerservice.ManagedClusterSKU{ - Name: containerservice.ManagedClusterSKUNameBasic, - Tier: tierName, - } - } - - if managedClusterSpec.LoadBalancerProfile != nil { - managedCluster.NetworkProfile.LoadBalancerProfile = &containerservice.ManagedClusterLoadBalancerProfile{ - AllocatedOutboundPorts: managedClusterSpec.LoadBalancerProfile.AllocatedOutboundPorts, - IdleTimeoutInMinutes: managedClusterSpec.LoadBalancerProfile.IdleTimeoutInMinutes, - } - if managedClusterSpec.LoadBalancerProfile.ManagedOutboundIPs != nil { - managedCluster.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{Count: managedClusterSpec.LoadBalancerProfile.ManagedOutboundIPs} - } - if len(managedClusterSpec.LoadBalancerProfile.OutboundIPPrefixes) > 0 { - managedCluster.NetworkProfile.LoadBalancerProfile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{ - PublicIPPrefixes: convertToResourceReferences(managedClusterSpec.LoadBalancerProfile.OutboundIPPrefixes), - } - } - if len(managedClusterSpec.LoadBalancerProfile.OutboundIPs) > 0 { - managedCluster.NetworkProfile.LoadBalancerProfile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{ - PublicIPs: convertToResourceReferences(managedClusterSpec.LoadBalancerProfile.OutboundIPs), + // Update control plane endpoint. + if managedCluster.ManagedClusterProperties != nil && managedCluster.ManagedClusterProperties.Fqdn != nil { + endpoint := clusterv1.APIEndpoint{ + Host: *managedCluster.ManagedClusterProperties.Fqdn, + Port: 443, } + s.Scope.SetControlPlaneEndpoint(endpoint) } - } - if managedClusterSpec.APIServerAccessProfile != nil { - managedCluster.APIServerAccessProfile = &containerservice.ManagedClusterAPIServerAccessProfile{ - AuthorizedIPRanges: &managedClusterSpec.APIServerAccessProfile.AuthorizedIPRanges, - EnablePrivateCluster: managedClusterSpec.APIServerAccessProfile.EnablePrivateCluster, - PrivateDNSZone: managedClusterSpec.APIServerAccessProfile.PrivateDNSZone, - EnablePrivateClusterPublicFQDN: managedClusterSpec.APIServerAccessProfile.EnablePrivateClusterPublicFQDN, - } - } - - customHeaders := maps.FilterByKeyPrefix(s.Scope.ManagedClusterAnnotations(), azure.CustomHeaderPrefix) - // Use the MC fetched from Azure if no update is needed. This is to ensure the read-only fields like Fqdn from the - // existing MC are used for updating the AzureManagedCluster. - result := existingMC - if isCreate { - result, err = s.Client.CreateOrUpdate(ctx, managedClusterSpec.ResourceGroupName, managedClusterSpec.Name, managedCluster, customHeaders) + // Update kubeconfig data + // Always fetch credentials in case of rotation + kubeConfigData, err := s.GetCredentials(ctx, s.Scope.ResourceGroup(), s.Scope.ClusterName()) if err != nil { - return fmt.Errorf("failed to create managed cluster, %w", err) - } - } else { - ps := *existingMC.ManagedClusterProperties.ProvisioningState - if ps != string(infrav1alpha4.Canceled) && ps != string(infrav1alpha4.Failed) && ps != string(infrav1alpha4.Succeeded) { - msg := fmt.Sprintf("Unable to update existing managed cluster in non terminal state. Managed cluster must be in one of the following provisioning states: canceled, failed, or succeeded. Actual state: %s", ps) - klog.V(2).Infof(msg) - return azure.WithTransientError(errors.New(msg), 20*time.Second) - } - - // Normalize the LoadBalancerProfile so the diff below doesn't get thrown off by AKS added properties. - if managedCluster.NetworkProfile.LoadBalancerProfile == nil { - // If our LoadBalancerProfile generated by the spec is nil, then don't worry about what AKS has added. - existingMC.NetworkProfile.LoadBalancerProfile = nil - } else { - // If our LoadBalancerProfile generated by the spec is not nil, then remove the effective outbound IPs from - // AKS. - existingMC.NetworkProfile.LoadBalancerProfile.EffectiveOutboundIPs = nil - } - - // Avoid changing agent pool profiles through AMCP and just use the existing agent pool profiles - // AgentPool changes are managed through AMMP - managedCluster.AgentPoolProfiles = existingMC.AgentPoolProfiles - - diff := computeDiffOfNormalizedClusters(managedCluster, existingMC) - if diff != "" { - klog.V(2).Infof("Update required (+new -old):\n%s", diff) - result, err = s.Client.CreateOrUpdate(ctx, managedClusterSpec.ResourceGroupName, managedClusterSpec.Name, managedCluster, customHeaders) - if err != nil { - return fmt.Errorf("failed to update managed cluster, %w", err) - } - } - } - - // Update control plane endpoint. - if result.ManagedClusterProperties != nil && result.ManagedClusterProperties.Fqdn != nil { - endpoint := clusterv1.APIEndpoint{ - Host: *result.ManagedClusterProperties.Fqdn, - Port: 443, - } - s.Scope.SetControlPlaneEndpoint(endpoint) - } else { - // Fail if cluster api endpoint is not available. - return fmt.Errorf("failed to get API endpoint for managed cluster") - } - - // Update kubeconfig data - // Always fetch credentials in case of rotation - kubeConfigData, err := s.Client.GetCredentials(ctx, s.Scope.ResourceGroup(), s.Scope.ClusterName()) - if err != nil { - return errors.Wrap(err, "failed to get credentials for managed cluster") - } - s.Scope.SetKubeConfigData(kubeConfigData) - - return nil -} - -func handleAddonProfiles(managedCluster containerservice.ManagedCluster, spec azure.ManagedClusterSpec) { - for i := range spec.AddonProfiles { - if managedCluster.AddonProfiles == nil { - managedCluster.AddonProfiles = map[string]*containerservice.ManagedClusterAddonProfile{} - } - item := spec.AddonProfiles[i] - addonProfile := &containerservice.ManagedClusterAddonProfile{ - Enabled: &item.Enabled, - } - if item.Config != nil { - addonProfile.Config = *to.StringMapPtr(item.Config) + return errors.Wrap(err, "failed to get credentials for managed cluster") } - managedCluster.AddonProfiles[item.Name] = addonProfile + s.Scope.SetKubeConfigData(kubeConfigData) } + s.Scope.UpdatePutStatus(infrav1.ManagedClusterRunningCondition, serviceName, resultErr) + return resultErr } // Delete deletes the managed cluster. @@ -412,18 +112,20 @@ func (s *Service) Delete(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.Service.Delete") defer done() - klog.V(2).Infof("Deleting managed cluster %s ", s.Scope.ClusterName()) - err := s.Client.Delete(ctx, s.Scope.ResourceGroup(), s.Scope.ClusterName()) + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) + defer cancel() + + // TODO (cecile): get rid of error for delete + managedClusterSpec, err := s.Scope.ManagedClusterSpec(ctx) if err != nil { - if azure.ResourceNotFound(err) { - // already deleted - return nil - } - return errors.Wrapf(err, "failed to delete managed cluster %s in resource group %s", s.Scope.ClusterName(), s.Scope.ResourceGroup()) + return errors.Wrap(err, "failed to get managed cluster spec") + } else if managedClusterSpec == nil { + return nil } - klog.V(2).Infof("successfully deleted managed cluster %s ", s.Scope.ClusterName()) - return nil + err = s.DeleteResource(ctx, managedClusterSpec, serviceName) + s.Scope.UpdateDeleteStatus(infrav1.ManagedClusterRunningCondition, serviceName, err) + return err } // IsManaged returns always returns true as CAPZ does not support BYO managed cluster. diff --git a/azure/services/managedclusters/managedclusters_test.go b/azure/services/managedclusters/managedclusters_test.go index 058aae3076c7..b93139170ad7 100644 --- a/azure/services/managedclusters/managedclusters_test.go +++ b/azure/services/managedclusters/managedclusters_test.go @@ -18,257 +18,112 @@ package managedclusters import ( "context" - "net/http" + "errors" "testing" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" - "github.com/Azure/go-autorest/autorest" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "k8s.io/utils/pointer" - "sigs.k8s.io/cluster-api-provider-azure/azure" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/async/mock_async" "sigs.k8s.io/cluster-api-provider-azure/azure/services/managedclusters/mock_managedclusters" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -func TestReconcile(t *testing.T) { - provisioningstatetestcases := []struct { - name string - provisioningStatesToTest []string - expectedError string - expect func(m *mock_managedclusters.MockClientMockRecorder, provisioningstate string, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) - }{ - { - name: "managedcluster in terminal provisioning state", - provisioningStatesToTest: []string{"Canceled", "Succeeded", "Failed"}, - expectedError: "", - expect: func(m *mock_managedclusters.MockClientMockRecorder, provisioningstate string, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) { - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-managedcluster", gomock.Any(), map[string]string{"myFeature": "true"}).Return(containerservice.ManagedCluster{ManagedClusterProperties: &containerservice.ManagedClusterProperties{ - Fqdn: pointer.String("my-managedcluster-fqdn"), - ProvisioningState: &provisioningstate, - }}, nil) - m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{ManagedClusterProperties: &containerservice.ManagedClusterProperties{ - Fqdn: pointer.String("my-managedcluster-fqdn"), - ProvisioningState: &provisioningstate, - NetworkProfile: &containerservice.NetworkProfile{}, - }}, nil) - m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-managedcluster").Times(1) - s.ClusterName().AnyTimes().Return("my-managedcluster") - s.ResourceGroup().AnyTimes().Return("my-rg") - s.ManagedClusterAnnotations().Times(1).Return(map[string]string{ - "infrastructure.cluster.x-k8s.io/custom-header-myFeature": "true", - }) - s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{ - Name: "my-managedcluster", - ResourceGroupName: "my-rg", - }, nil) - s.SetControlPlaneEndpoint(gomock.Any()).Times(1) - s.SetKubeConfigData(gomock.Any()).Times(1) - }, - }, - { - name: "managedcluster in nonterminal provisioning state", - provisioningStatesToTest: []string{"Deleting", "InProgress", "randomStringHere"}, - expectedError: "Unable to update existing managed cluster in non terminal state. Managed cluster must be in one of the following provisioning states: canceled, failed, or succeeded. Actual state", - expect: func(m *mock_managedclusters.MockClientMockRecorder, provisioningstate string, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) { - m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{ManagedClusterProperties: &containerservice.ManagedClusterProperties{ - ProvisioningState: &provisioningstate, - }}, nil) - s.ClusterName().AnyTimes().Return("my-managedcluster") - s.ResourceGroup().AnyTimes().Return("my-rg") - s.ManagedClusterAnnotations().Times(1).Return(map[string]string{}) - s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{ - Name: "my-managedcluster", - ResourceGroupName: "my-rg", - }, nil) - }, - }, - } - - for _, tc := range provisioningstatetestcases { - for _, provisioningstate := range tc.provisioningStatesToTest { - t.Logf("Testing managedcluster provision state: " + provisioningstate) - t.Run(tc.name, func(t *testing.T) { - g := NewWithT(t) - - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() - scopeMock := mock_managedclusters.NewMockManagedClusterScope(mockCtrl) - clientMock := mock_managedclusters.NewMockClient(mockCtrl) - - tc.expect(clientMock.EXPECT(), provisioningstate, scopeMock.EXPECT()) - - s := &Service{ - Scope: scopeMock, - Client: clientMock, - } - - err := s.Reconcile(context.TODO()) - if tc.expectedError != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tc.expectedError)) - g.Expect(err.Error()).To(ContainSubstring(provisioningstate)) - } else { - g.Expect(err).NotTo(HaveOccurred()) - } - }) - } - } +var fakeManagedClusterSpec = &ManagedClusterSpec{Name: "my-managedcluster"} +func TestReconcile(t *testing.T) { testcases := []struct { name string expectedError string - expect func(m *mock_managedclusters.MockClientMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) + expect func(m *mock_managedclusters.MockCredentialGetterMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) }{ { - name: "no managedcluster exists", + name: "noop if managedcluster spec is nil", expectedError: "", - expect: func(m *mock_managedclusters.MockClientMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) { - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-managedcluster", gomock.Any(), gomock.Any()).Return(containerservice.ManagedCluster{ManagedClusterProperties: &containerservice.ManagedClusterProperties{ - Fqdn: pointer.String("my-managedcluster-fqdn"), - ProvisioningState: pointer.String("Succeeded"), - }}, nil).Times(1) - m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not Found")) - m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-managedcluster").Times(1) - s.ClusterName().AnyTimes().Return("my-managedcluster") - s.ResourceGroup().AnyTimes().Return("my-rg") - s.ManagedClusterAnnotations().Times(1).Return(map[string]string{}) - s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{ - Name: "my-managedcluster", - ResourceGroupName: "my-rg", - }, nil) - s.GetAllAgentPoolSpecs(gomockinternal.AContext()).AnyTimes().Return([]azure.AgentPoolSpec{ - { - Name: "my-agentpool", - SKU: "Standard_D4s_v3", - Replicas: 1, - OSDiskSizeGB: 0, - }, - }, nil) - s.SetControlPlaneEndpoint(gomock.Eq(clusterv1.APIEndpoint{ - Host: "my-managedcluster-fqdn", - Port: 443, - })).Times(1) - s.SetKubeConfigData(gomock.Any()).Times(1) + expect: func(m *mock_managedclusters.MockCredentialGetterMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.ManagedClusterSpec(gomockinternal.AContext()).Return(nil, nil) }, }, { - name: "missing fqdn after create", - expectedError: "failed to get API endpoint for managed cluster", - expect: func(m *mock_managedclusters.MockClientMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) { - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-managedcluster", gomock.Any(), gomock.Any()).Return(containerservice.ManagedCluster{ManagedClusterProperties: &containerservice.ManagedClusterProperties{}}, nil) - m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not Found")) - s.ClusterName().AnyTimes().Return("my-managedcluster") - s.ResourceGroup().AnyTimes().Return("my-rg") - s.ManagedClusterAnnotations().Times(1).Return(map[string]string{}) - s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{ - Name: "my-managedcluster", - ResourceGroupName: "my-rg", - }, nil) - s.GetAllAgentPoolSpecs(gomockinternal.AContext()).AnyTimes().Return([]azure.AgentPoolSpec{ - { - Name: "my-agentpool", - SKU: "Standard_D4s_v3", - Replicas: 1, - OSDiskSizeGB: 0, - }, - }, nil) + name: "fail if managedcluster spec returns an error", + expectedError: "failed to get managed cluster spec: foo error", + expect: func(m *mock_managedclusters.MockCredentialGetterMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.ManagedClusterSpec(gomockinternal.AContext()).Return(nil, errors.New("foo error")) }, }, { - name: "set correct ControlPlaneEndpoint using fqdn from existing MC after update", + name: "create managed cluster returns an error", + expectedError: "some unexpected error occured", + expect: func(m *mock_managedclusters.MockCredentialGetterMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.ManagedClusterSpec(gomockinternal.AContext()).Return(fakeManagedClusterSpec, nil) + r.CreateResource(gomockinternal.AContext(), fakeManagedClusterSpec, serviceName).Return(nil, errors.New("some unexpected error occured")) + s.UpdatePutStatus(infrav1.ManagedClusterRunningCondition, serviceName, errors.New("some unexpected error occured")) + }, + }, + { + name: "create managed cluster succeeds", expectedError: "", - expect: func(m *mock_managedclusters.MockClientMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) { - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-managedcluster", gomock.Any(), gomock.Any()).Return(containerservice.ManagedCluster{ - ManagedClusterProperties: &containerservice.ManagedClusterProperties{ - Fqdn: pointer.String("my-managedcluster-fqdn"), - ProvisioningState: pointer.String("Succeeded"), - }, - }, nil) - m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{ + expect: func(m *mock_managedclusters.MockCredentialGetterMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.ManagedClusterSpec(gomockinternal.AContext()).Return(fakeManagedClusterSpec, nil) + r.CreateResource(gomockinternal.AContext(), fakeManagedClusterSpec, serviceName).Return(containerservice.ManagedCluster{ ManagedClusterProperties: &containerservice.ManagedClusterProperties{ Fqdn: pointer.String("my-managedcluster-fqdn"), ProvisioningState: pointer.String("Succeeded"), - NetworkProfile: &containerservice.NetworkProfile{}, - }, - }, nil).Times(1) - m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-managedcluster").Times(1) - s.ClusterName().AnyTimes().Return("my-managedcluster") - s.ResourceGroup().AnyTimes().Return("my-rg") - s.ManagedClusterAnnotations().Times(1).Return(map[string]string{}) - s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{ - Name: "my-managedcluster", - ResourceGroupName: "my-rg", - }, nil) - s.GetAllAgentPoolSpecs(gomockinternal.AContext()).AnyTimes().Return([]azure.AgentPoolSpec{ - { - Name: "my-agentpool", - SKU: "Standard_D4s_v3", - Replicas: 1, - OSDiskSizeGB: 0, }, }, nil) - s.SetControlPlaneEndpoint(gomock.Eq(clusterv1.APIEndpoint{ + s.SetControlPlaneEndpoint(clusterv1.APIEndpoint{ Host: "my-managedcluster-fqdn", Port: 443, - })).Times(1) - s.SetKubeConfigData(gomock.Any()).Times(1) + }) + s.ResourceGroup().Return("my-rg") + s.ClusterName().Return("my-cluster") + m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-cluster").Return([]byte("credentials"), nil) + s.SetKubeConfigData([]byte("credentials")) + s.UpdatePutStatus(infrav1.ManagedClusterRunningCondition, serviceName, nil) }, }, { - name: "no update needed - set correct ControlPlaneEndpoint using fqdn from existing MC", - expectedError: "", - expect: func(m *mock_managedclusters.MockClientMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder) { - m.Get(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return(containerservice.ManagedCluster{ + name: "fail to get managed cluster credentials", + expectedError: "failed to get credentials for managed cluster: internal server error", + expect: func(m *mock_managedclusters.MockCredentialGetterMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.ManagedClusterSpec(gomockinternal.AContext()).Return(fakeManagedClusterSpec, nil) + r.CreateResource(gomockinternal.AContext(), fakeManagedClusterSpec, serviceName).Return(containerservice.ManagedCluster{ ManagedClusterProperties: &containerservice.ManagedClusterProperties{ Fqdn: pointer.String("my-managedcluster-fqdn"), ProvisioningState: pointer.String("Succeeded"), - KubernetesVersion: pointer.String("1.1"), - NetworkProfile: &containerservice.NetworkProfile{}, - }, - }, nil).Times(1) - m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-managedcluster").Times(1) - s.ClusterName().AnyTimes().Return("my-managedcluster") - s.ResourceGroup().AnyTimes().Return("my-rg") - s.ManagedClusterAnnotations().Times(1).Return(map[string]string{}) - s.ManagedClusterSpec().AnyTimes().Return(azure.ManagedClusterSpec{ - Name: "my-managedcluster", - ResourceGroupName: "my-rg", - Version: "1.1", - }, nil) - s.GetAllAgentPoolSpecs(gomockinternal.AContext()).AnyTimes().Return([]azure.AgentPoolSpec{ - { - Name: "my-agentpool", - SKU: "Standard_D4s_v3", - Replicas: 1, - OSDiskSizeGB: 0, }, }, nil) - s.SetControlPlaneEndpoint(gomock.Eq(clusterv1.APIEndpoint{ + s.SetControlPlaneEndpoint(clusterv1.APIEndpoint{ Host: "my-managedcluster-fqdn", Port: 443, - })).Times(1) - s.SetKubeConfigData(gomock.Any()).Times(1) + }) + s.ResourceGroup().Return("my-rg") + s.ClusterName().Return("my-cluster") + m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-cluster").Return([]byte(""), errors.New("internal server error")) }, }, } for _, tc := range testcases { - t.Logf("Testing " + tc.name) + tc := tc t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - + t.Parallel() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() scopeMock := mock_managedclusters.NewMockManagedClusterScope(mockCtrl) - clientMock := mock_managedclusters.NewMockClient(mockCtrl) + credsGetterMock := mock_managedclusters.NewMockCredentialGetter(mockCtrl) + reconcilerMock := mock_async.NewMockReconciler(mockCtrl) - tc.expect(clientMock.EXPECT(), scopeMock.EXPECT()) + tc.expect(credsGetterMock.EXPECT(), scopeMock.EXPECT(), reconcilerMock.EXPECT()) s := &Service{ - Scope: scopeMock, - Client: clientMock, + Scope: scopeMock, + CredentialGetter: credsGetterMock, + Reconciler: reconcilerMock, } err := s.Reconcile(context.TODO()) @@ -281,3 +136,64 @@ func TestReconcile(t *testing.T) { }) } } + +func TestDelete(t *testing.T) { + testcases := []struct { + name string + expectedError string + expect func(s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) + }{ + { + name: "noop if no managed cluster spec is found", + expectedError: "", + expect: func(s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.ManagedClusterSpec(gomockinternal.AContext()).Return(nil, nil) + }, + }, + { + name: "successfully delete an existing managed cluster", + expectedError: "", + expect: func(s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.ManagedClusterSpec(gomockinternal.AContext()).Return(fakeManagedClusterSpec, nil) + r.DeleteResource(gomockinternal.AContext(), fakeManagedClusterSpec, serviceName).Return(nil) + s.UpdateDeleteStatus(infrav1.ManagedClusterRunningCondition, serviceName, nil) + }, + }, + { + name: "managed cluster deletion fails", + expectedError: "internal error", + expect: func(s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.ManagedClusterSpec(gomockinternal.AContext()).Return(fakeManagedClusterSpec, nil) + r.DeleteResource(gomockinternal.AContext(), fakeManagedClusterSpec, serviceName).Return(errors.New("internal error")) + s.UpdateDeleteStatus(infrav1.ManagedClusterRunningCondition, serviceName, errors.New("internal error")) + }, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + t.Parallel() + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + scopeMock := mock_managedclusters.NewMockManagedClusterScope(mockCtrl) + asyncMock := mock_async.NewMockReconciler(mockCtrl) + + tc.expect(scopeMock.EXPECT(), asyncMock.EXPECT()) + + s := &Service{ + Scope: scopeMock, + Reconciler: asyncMock, + } + + err := s.Delete(context.TODO()) + if tc.expectedError != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(tc.expectedError)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} diff --git a/azure/services/managedclusters/mock_managedclusters/client_mock.go b/azure/services/managedclusters/mock_managedclusters/client_mock.go index a8353b713a9d..735939603c5c 100644 --- a/azure/services/managedclusters/mock_managedclusters/client_mock.go +++ b/azure/services/managedclusters/mock_managedclusters/client_mock.go @@ -24,79 +24,34 @@ import ( context "context" reflect "reflect" - containerservice "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" gomock "github.com/golang/mock/gomock" ) -// MockClient is a mock of Client interface. -type MockClient struct { +// MockCredentialGetter is a mock of CredentialGetter interface. +type MockCredentialGetter struct { ctrl *gomock.Controller - recorder *MockClientMockRecorder + recorder *MockCredentialGetterMockRecorder } -// MockClientMockRecorder is the mock recorder for MockClient. -type MockClientMockRecorder struct { - mock *MockClient +// MockCredentialGetterMockRecorder is the mock recorder for MockCredentialGetter. +type MockCredentialGetterMockRecorder struct { + mock *MockCredentialGetter } -// NewMockClient creates a new mock instance. -func NewMockClient(ctrl *gomock.Controller) *MockClient { - mock := &MockClient{ctrl: ctrl} - mock.recorder = &MockClientMockRecorder{mock} +// NewMockCredentialGetter creates a new mock instance. +func NewMockCredentialGetter(ctrl *gomock.Controller) *MockCredentialGetter { + mock := &MockCredentialGetter{ctrl: ctrl} + mock.recorder = &MockCredentialGetterMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClient) EXPECT() *MockClientMockRecorder { +func (m *MockCredentialGetter) EXPECT() *MockCredentialGetterMockRecorder { return m.recorder } -// CreateOrUpdate mocks base method. -func (m *MockClient) CreateOrUpdate(arg0 context.Context, arg1, arg2 string, arg3 containerservice.ManagedCluster, arg4 map[string]string) (containerservice.ManagedCluster, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(containerservice.ManagedCluster) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockClientMockRecorder) CreateOrUpdate(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockClient)(nil).CreateOrUpdate), arg0, arg1, arg2, arg3, arg4) -} - -// Delete mocks base method. -func (m *MockClient) Delete(arg0 context.Context, arg1, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockClientMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockClient)(nil).Delete), arg0, arg1, arg2) -} - -// Get mocks base method. -func (m *MockClient) Get(arg0 context.Context, arg1, arg2 string) (containerservice.ManagedCluster, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2) - ret0, _ := ret[0].(containerservice.ManagedCluster) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), arg0, arg1, arg2) -} - // GetCredentials mocks base method. -func (m *MockClient) GetCredentials(arg0 context.Context, arg1, arg2 string) ([]byte, error) { +func (m *MockCredentialGetter) GetCredentials(arg0 context.Context, arg1, arg2 string) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCredentials", arg0, arg1, arg2) ret0, _ := ret[0].([]byte) @@ -105,7 +60,7 @@ func (m *MockClient) GetCredentials(arg0 context.Context, arg1, arg2 string) ([] } // GetCredentials indicates an expected call of GetCredentials. -func (mr *MockClientMockRecorder) GetCredentials(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockCredentialGetterMockRecorder) GetCredentials(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCredentials", reflect.TypeOf((*MockClient)(nil).GetCredentials), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCredentials", reflect.TypeOf((*MockCredentialGetter)(nil).GetCredentials), arg0, arg1, arg2) } diff --git a/azure/services/managedclusters/mock_managedclusters/doc.go b/azure/services/managedclusters/mock_managedclusters/doc.go index 4ce8a4e79ae6..0a4606ca749f 100644 --- a/azure/services/managedclusters/mock_managedclusters/doc.go +++ b/azure/services/managedclusters/mock_managedclusters/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // Run go generate to regenerate this mock. -//go:generate ../../../../hack/tools/bin/mockgen -destination client_mock.go -package mock_managedclusters -source ../client.go Client +//go:generate ../../../../hack/tools/bin/mockgen -destination client_mock.go -package mock_managedclusters -source ../client.go CredentialGetter //go:generate ../../../../hack/tools/bin/mockgen -destination managedclusters_mock.go -package mock_managedclusters -source ../managedclusters.go ManagedClusterScope //go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt client_mock.go > _client_mock.go && mv _client_mock.go client_mock.go" //go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt managedclusters_mock.go > _managedclusters_mock.go && mv _managedclusters_mock.go managedclusters_mock.go" diff --git a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go index 6dc606e5652c..a18cc15ac829 100644 --- a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go +++ b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go @@ -181,6 +181,18 @@ func (mr *MockManagedClusterScopeMockRecorder) ClusterName() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockManagedClusterScope)(nil).ClusterName)) } +// DeleteLongRunningOperationState mocks base method. +func (m *MockManagedClusterScope) DeleteLongRunningOperationState(arg0, arg1 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteLongRunningOperationState", arg0, arg1) +} + +// DeleteLongRunningOperationState indicates an expected call of DeleteLongRunningOperationState. +func (mr *MockManagedClusterScopeMockRecorder) DeleteLongRunningOperationState(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLongRunningOperationState", reflect.TypeOf((*MockManagedClusterScope)(nil).DeleteLongRunningOperationState), arg0, arg1) +} + // FailureDomains mocks base method. func (m *MockManagedClusterScope) FailureDomains() []string { m.ctrl.T.Helper() @@ -195,21 +207,6 @@ func (mr *MockManagedClusterScopeMockRecorder) FailureDomains() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FailureDomains", reflect.TypeOf((*MockManagedClusterScope)(nil).FailureDomains)) } -// GetAllAgentPoolSpecs mocks base method. -func (m *MockManagedClusterScope) GetAllAgentPoolSpecs(ctx context.Context) ([]azure.AgentPoolSpec, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllAgentPoolSpecs", ctx) - ret0, _ := ret[0].([]azure.AgentPoolSpec) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAllAgentPoolSpecs indicates an expected call of GetAllAgentPoolSpecs. -func (mr *MockManagedClusterScopeMockRecorder) GetAllAgentPoolSpecs(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllAgentPoolSpecs", reflect.TypeOf((*MockManagedClusterScope)(nil).GetAllAgentPoolSpecs), ctx) -} - // GetKubeConfigData mocks base method. func (m *MockManagedClusterScope) GetKubeConfigData() []byte { m.ctrl.T.Helper() @@ -224,6 +221,20 @@ func (mr *MockManagedClusterScopeMockRecorder) GetKubeConfigData() *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKubeConfigData", reflect.TypeOf((*MockManagedClusterScope)(nil).GetKubeConfigData)) } +// GetLongRunningOperationState mocks base method. +func (m *MockManagedClusterScope) GetLongRunningOperationState(arg0, arg1 string) *v1beta1.Future { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLongRunningOperationState", arg0, arg1) + ret0, _ := ret[0].(*v1beta1.Future) + return ret0 +} + +// GetLongRunningOperationState indicates an expected call of GetLongRunningOperationState. +func (mr *MockManagedClusterScopeMockRecorder) GetLongRunningOperationState(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLongRunningOperationState", reflect.TypeOf((*MockManagedClusterScope)(nil).GetLongRunningOperationState), arg0, arg1) +} + // HashKey mocks base method. func (m *MockManagedClusterScope) HashKey() string { m.ctrl.T.Helper() @@ -266,33 +277,19 @@ func (mr *MockManagedClusterScopeMockRecorder) MakeEmptyKubeConfigSecret() *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MakeEmptyKubeConfigSecret", reflect.TypeOf((*MockManagedClusterScope)(nil).MakeEmptyKubeConfigSecret)) } -// ManagedClusterAnnotations mocks base method. -func (m *MockManagedClusterScope) ManagedClusterAnnotations() map[string]string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ManagedClusterAnnotations") - ret0, _ := ret[0].(map[string]string) - return ret0 -} - -// ManagedClusterAnnotations indicates an expected call of ManagedClusterAnnotations. -func (mr *MockManagedClusterScopeMockRecorder) ManagedClusterAnnotations() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ManagedClusterAnnotations", reflect.TypeOf((*MockManagedClusterScope)(nil).ManagedClusterAnnotations)) -} - // ManagedClusterSpec mocks base method. -func (m *MockManagedClusterScope) ManagedClusterSpec() (azure.ManagedClusterSpec, error) { +func (m *MockManagedClusterScope) ManagedClusterSpec(arg0 context.Context) (azure.ResourceSpecGetter, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ManagedClusterSpec") - ret0, _ := ret[0].(azure.ManagedClusterSpec) + ret := m.ctrl.Call(m, "ManagedClusterSpec", arg0) + ret0, _ := ret[0].(azure.ResourceSpecGetter) ret1, _ := ret[1].(error) return ret0, ret1 } // ManagedClusterSpec indicates an expected call of ManagedClusterSpec. -func (mr *MockManagedClusterScopeMockRecorder) ManagedClusterSpec() *gomock.Call { +func (mr *MockManagedClusterScopeMockRecorder) ManagedClusterSpec(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ManagedClusterSpec", reflect.TypeOf((*MockManagedClusterScope)(nil).ManagedClusterSpec)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ManagedClusterSpec", reflect.TypeOf((*MockManagedClusterScope)(nil).ManagedClusterSpec), arg0) } // ResourceGroup mocks base method. @@ -333,6 +330,18 @@ func (mr *MockManagedClusterScopeMockRecorder) SetKubeConfigData(arg0 interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetKubeConfigData", reflect.TypeOf((*MockManagedClusterScope)(nil).SetKubeConfigData), arg0) } +// SetLongRunningOperationState mocks base method. +func (m *MockManagedClusterScope) SetLongRunningOperationState(arg0 *v1beta1.Future) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLongRunningOperationState", arg0) +} + +// SetLongRunningOperationState indicates an expected call of SetLongRunningOperationState. +func (mr *MockManagedClusterScopeMockRecorder) SetLongRunningOperationState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLongRunningOperationState", reflect.TypeOf((*MockManagedClusterScope)(nil).SetLongRunningOperationState), arg0) +} + // SubscriptionID mocks base method. func (m *MockManagedClusterScope) SubscriptionID() string { m.ctrl.T.Helper() @@ -360,3 +369,39 @@ func (mr *MockManagedClusterScopeMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockManagedClusterScope)(nil).TenantID)) } + +// UpdateDeleteStatus mocks base method. +func (m *MockManagedClusterScope) UpdateDeleteStatus(arg0 v1beta10.ConditionType, arg1 string, arg2 error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateDeleteStatus", arg0, arg1, arg2) +} + +// UpdateDeleteStatus indicates an expected call of UpdateDeleteStatus. +func (mr *MockManagedClusterScopeMockRecorder) UpdateDeleteStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDeleteStatus", reflect.TypeOf((*MockManagedClusterScope)(nil).UpdateDeleteStatus), arg0, arg1, arg2) +} + +// UpdatePatchStatus mocks base method. +func (m *MockManagedClusterScope) UpdatePatchStatus(arg0 v1beta10.ConditionType, arg1 string, arg2 error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdatePatchStatus", arg0, arg1, arg2) +} + +// UpdatePatchStatus indicates an expected call of UpdatePatchStatus. +func (mr *MockManagedClusterScopeMockRecorder) UpdatePatchStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePatchStatus", reflect.TypeOf((*MockManagedClusterScope)(nil).UpdatePatchStatus), arg0, arg1, arg2) +} + +// UpdatePutStatus mocks base method. +func (m *MockManagedClusterScope) UpdatePutStatus(arg0 v1beta10.ConditionType, arg1 string, arg2 error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdatePutStatus", arg0, arg1, arg2) +} + +// UpdatePutStatus indicates an expected call of UpdatePutStatus. +func (mr *MockManagedClusterScopeMockRecorder) UpdatePutStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockManagedClusterScope)(nil).UpdatePutStatus), arg0, arg1, arg2) +} diff --git a/azure/services/managedclusters/spec.go b/azure/services/managedclusters/spec.go new file mode 100644 index 000000000000..8bed6ee05cd3 --- /dev/null +++ b/azure/services/managedclusters/spec.go @@ -0,0 +1,424 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedclusters + +import ( + "fmt" + "net" + "time" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" + "github.com/Azure/go-autorest/autorest/to" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" +) + +// ManagedClusterSpec contains properties to create a managed cluster. +type ManagedClusterSpec struct { + // Name is the name of this AKS Cluster. + Name string + + // ResourceGroup is the name of the Azure resource group for this AKS Cluster. + ResourceGroup string + + // NodeResourceGroupName is the name of the Azure resource group containing IaaS VMs. + NodeResourceGroupName string + + // VnetSubnetID is the Azure Resource ID for the subnet which should contain nodes. + VnetSubnetID string + + // Location is a string matching one of the canonical Azure region names. Examples: "westus2", "eastus". + Location string + + // Tags is a set of tags to add to this cluster. + Tags map[string]string + + // Version defines the desired Kubernetes version. + Version string + + // LoadBalancerSKU for the managed cluster. Possible values include: 'Standard', 'Basic'. Defaults to Standard. + LoadBalancerSKU string + + // NetworkPlugin used for building Kubernetes network. Possible values include: 'azure', 'kubenet'. Defaults to azure. + NetworkPlugin string + + // NetworkPolicy used for building Kubernetes network. Possible values include: 'calico', 'azure'. Defaults to azure. + NetworkPolicy string + + // SSHPublicKey is a string literal containing an ssh public key. Will autogenerate and discard if not provided. + SSHPublicKey string + + // AgentPools is the list of agent pool specifications in this cluster. + AgentPools []azure.AgentPoolSpec + + // PodCIDR is the CIDR block for IP addresses distributed to pods + PodCIDR string + + // ServiceCIDR is the CIDR block for IP addresses distributed to services + ServiceCIDR string + + // DNSServiceIP is an IP address assigned to the Kubernetes DNS service + DNSServiceIP *string + + // AddonProfiles are the profiles of managed cluster add-on. + AddonProfiles []AddonProfile + + // AADProfile is Azure Active Directory configuration to integrate with AKS, for aad authentication. + AADProfile *AADProfile + + // SKU is the SKU of the AKS to be provisioned. + SKU *SKU + + // LoadBalancerProfile is the profile of the cluster load balancer. + LoadBalancerProfile *LoadBalancerProfile + + // APIServerAccessProfile is the access profile for AKS API server. + APIServerAccessProfile *APIServerAccessProfile + + // Headers is the list of headers to add to the HTTP requests to update this resource. + Headers map[string]string +} + +// AADProfile is Azure Active Directory configuration to integrate with AKS, for aad authentication. +type AADProfile struct { + // Managed - Whether to enable managed AAD. + Managed bool + + // EnableAzureRBAC - Whether to enable Azure RBAC for Kubernetes authorization. + EnableAzureRBAC bool + + // AdminGroupObjectIDs - AAD group object IDs that will have admin role of the cluster. + AdminGroupObjectIDs []string +} + +// AddonProfile - Profile of managed cluster add-on. +type AddonProfile struct { + Name string + Config map[string]string + Enabled bool +} + +// SKU - AKS SKU. +type SKU struct { + // Tier - Tier of a managed cluster SKU. + Tier string +} + +// LoadBalancerProfile - Profile of the cluster load balancer. +type LoadBalancerProfile struct { + // Load balancer profile must specify at most one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs. + // By default the AKS cluster automatically creates a public IP in the AKS-managed infrastructure resource group and assigns it to the load balancer outbound pool. + // Alternatively, you can assign your own custom public IP or public IP prefix at cluster creation time. + // See https://docs.microsoft.com/en-us/azure/aks/load-balancer-standard#provide-your-own-outbound-public-ips-or-prefixes + + // ManagedOutboundIPs - Desired managed outbound IPs for the cluster load balancer. + ManagedOutboundIPs *int32 + + // OutboundIPPrefixes - Desired outbound IP Prefix resources for the cluster load balancer. + OutboundIPPrefixes []string + + // OutboundIPs - Desired outbound IP resources for the cluster load balancer. + OutboundIPs []string + + // AllocatedOutboundPorts - Desired number of allocated SNAT ports per VM. Allowed values must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports. + AllocatedOutboundPorts *int32 + + // IdleTimeoutInMinutes - Desired outbound flow idle timeout in minutes. Allowed values must be in the range of 4 to 120 (inclusive). The default value is 30 minutes. + IdleTimeoutInMinutes *int32 +} + +// APIServerAccessProfile is the access profile for AKS API server. +type APIServerAccessProfile struct { + // AuthorizedIPRanges - Authorized IP Ranges to kubernetes API server. + AuthorizedIPRanges []string + // EnablePrivateCluster - Whether to create the cluster as a private cluster or not. + EnablePrivateCluster *bool + // PrivateDNSZone - Private dns zone mode for private cluster. + PrivateDNSZone *string + // EnablePrivateClusterPublicFQDN - Whether to create additional public FQDN for private cluster or not. + EnablePrivateClusterPublicFQDN *bool +} + +// ResourceName returns the name of the group. +func (s *ManagedClusterSpec) ResourceName() string { + return s.Name +} + +// ResourceGroupName returns the name of the group. +// Note that it is the same as the resource name in this case. +func (s *ManagedClusterSpec) ResourceGroupName() string { + return s.ResourceGroup +} + +// OwnerResourceName is a no-op for groups. +func (s *ManagedClusterSpec) OwnerResourceName() string { + return "" // not applicable +} + +// CustomHeaders returns custom headers to be added to the Azure API calls. +func (s *ManagedClusterSpec) CustomHeaders() map[string]string { + return s.Headers +} + +// Parameters returns the parameters for the group. +func (s *ManagedClusterSpec) Parameters(existing interface{}) (params interface{}, err error) { + managedCluster := containerservice.ManagedCluster{ + Identity: &containerservice.ManagedClusterIdentity{ + Type: containerservice.ResourceIdentityTypeSystemAssigned, + }, + Location: &s.Location, + Tags: *to.StringMapPtr(s.Tags), + ManagedClusterProperties: &containerservice.ManagedClusterProperties{ + NodeResourceGroup: &s.NodeResourceGroupName, + EnableRBAC: to.BoolPtr(true), + DNSPrefix: &s.Name, + KubernetesVersion: &s.Version, + LinuxProfile: &containerservice.LinuxProfile{ + AdminUsername: to.StringPtr("azureuser"), + SSH: &containerservice.SSHConfiguration{ + PublicKeys: &[]containerservice.SSHPublicKey{ + { + KeyData: &s.SSHPublicKey, + }, + }, + }, + }, + ServicePrincipalProfile: &containerservice.ManagedClusterServicePrincipalProfile{ + ClientID: to.StringPtr("msi"), + }, + AgentPoolProfiles: &[]containerservice.ManagedClusterAgentPoolProfile{}, + NetworkProfile: &containerservice.NetworkProfile{ + NetworkPlugin: containerservice.NetworkPlugin(s.NetworkPlugin), + LoadBalancerSku: containerservice.LoadBalancerSku(s.LoadBalancerSKU), + NetworkPolicy: containerservice.NetworkPolicy(s.NetworkPolicy), + }, + }, + } + + if s.PodCIDR != "" { + managedCluster.NetworkProfile.PodCidr = &s.PodCIDR + } + + if s.ServiceCIDR != "" { + if s.DNSServiceIP == nil { + managedCluster.NetworkProfile.ServiceCidr = &s.ServiceCIDR + ip, _, err := net.ParseCIDR(s.ServiceCIDR) + if err != nil { + return nil, fmt.Errorf("failed to parse service cidr: %w", err) + } + // HACK: set the last octet of the IP to .10 + // This ensures the dns IP is valid in the service cidr without forcing the user + // to specify it in both the Capi cluster and the Azure control plane. + // https://golang.org/src/net/ip.go#L48 + ip[15] = byte(10) + dnsIP := ip.String() + managedCluster.NetworkProfile.DNSServiceIP = &dnsIP + } else { + managedCluster.NetworkProfile.DNSServiceIP = s.DNSServiceIP + } + } + + for i := range s.AgentPools { + pool := s.AgentPools[i] + profile := containerservice.ManagedClusterAgentPoolProfile{ + Name: &pool.Name, + VMSize: &pool.SKU, + OsDiskSizeGB: &pool.OSDiskSizeGB, + Count: &pool.Replicas, + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + VnetSubnetID: &s.VnetSubnetID, + Mode: containerservice.AgentPoolMode(pool.Mode), + AvailabilityZones: &pool.AvailabilityZones, + MaxPods: pool.MaxPods, + OrchestratorVersion: pool.Version, + OsDiskType: containerservice.OSDiskType(to.String(pool.OsDiskType)), + } + *managedCluster.AgentPoolProfiles = append(*managedCluster.AgentPoolProfiles, profile) + } + + if s.AADProfile != nil { + managedCluster.AadProfile = &containerservice.ManagedClusterAADProfile{ + Managed: &s.AADProfile.Managed, + EnableAzureRBAC: &s.AADProfile.EnableAzureRBAC, + AdminGroupObjectIDs: &s.AADProfile.AdminGroupObjectIDs, + } + } + + for i := range s.AddonProfiles { + if managedCluster.AddonProfiles == nil { + managedCluster.AddonProfiles = map[string]*containerservice.ManagedClusterAddonProfile{} + } + item := s.AddonProfiles[i] + addonProfile := &containerservice.ManagedClusterAddonProfile{ + Enabled: &item.Enabled, + } + if item.Config != nil { + addonProfile.Config = *to.StringMapPtr(item.Config) + } + managedCluster.AddonProfiles[item.Name] = addonProfile + } + + if s.SKU != nil { + tierName := containerservice.ManagedClusterSKUTier(s.SKU.Tier) + managedCluster.Sku = &containerservice.ManagedClusterSKU{ + Name: containerservice.ManagedClusterSKUNameBasic, + Tier: tierName, + } + } + + if s.LoadBalancerProfile != nil { + managedCluster.NetworkProfile.LoadBalancerProfile = &containerservice.ManagedClusterLoadBalancerProfile{ + AllocatedOutboundPorts: s.LoadBalancerProfile.AllocatedOutboundPorts, + IdleTimeoutInMinutes: s.LoadBalancerProfile.IdleTimeoutInMinutes, + } + if s.LoadBalancerProfile.ManagedOutboundIPs != nil { + managedCluster.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{Count: s.LoadBalancerProfile.ManagedOutboundIPs} + } + if len(s.LoadBalancerProfile.OutboundIPPrefixes) > 0 { + managedCluster.NetworkProfile.LoadBalancerProfile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{ + PublicIPPrefixes: convertToResourceReferences(s.LoadBalancerProfile.OutboundIPPrefixes), + } + } + if len(s.LoadBalancerProfile.OutboundIPs) > 0 { + managedCluster.NetworkProfile.LoadBalancerProfile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{ + PublicIPs: convertToResourceReferences(s.LoadBalancerProfile.OutboundIPs), + } + } + } + + if s.APIServerAccessProfile != nil { + managedCluster.APIServerAccessProfile = &containerservice.ManagedClusterAPIServerAccessProfile{ + AuthorizedIPRanges: &s.APIServerAccessProfile.AuthorizedIPRanges, + EnablePrivateCluster: s.APIServerAccessProfile.EnablePrivateCluster, + PrivateDNSZone: s.APIServerAccessProfile.PrivateDNSZone, + EnablePrivateClusterPublicFQDN: s.APIServerAccessProfile.EnablePrivateClusterPublicFQDN, + } + } + + if existing != nil { + existingMC, ok := existing.(containerservice.ManagedCluster) + if !ok { + return nil, fmt.Errorf("%T is not a containerservice.ManagedCluster", existing) + } + ps := *existingMC.ManagedClusterProperties.ProvisioningState + if ps != string(infrav1.Canceled) && ps != string(infrav1.Failed) && ps != string(infrav1.Succeeded) { + return nil, azure.WithTransientError(errors.Errorf("Unable to update existing managed cluster in non-terminal state. Managed cluster must be in one of the following provisioning states: Canceled, Failed, or Succeeded. Actual state: %s", ps), 20*time.Second) + } + + // Normalize the LoadBalancerProfile so the diff below doesn't get thrown off by AKS added properties. + if managedCluster.NetworkProfile.LoadBalancerProfile == nil { + // If our LoadBalancerProfile generated by the spec is nil, then don't worry about what AKS has added. + existingMC.NetworkProfile.LoadBalancerProfile = nil + } else { + // If our LoadBalancerProfile generated by the spec is not nil, then remove the effective outbound IPs from + // AKS. + existingMC.NetworkProfile.LoadBalancerProfile.EffectiveOutboundIPs = nil + } + + // Avoid changing agent pool profiles through AMCP and just use the existing agent pool profiles + // AgentPool changes are managed through AMMP. + managedCluster.AgentPoolProfiles = existingMC.AgentPoolProfiles + + diff := computeDiffOfNormalizedClusters(managedCluster, existingMC) + if diff == "" { + return nil, nil + } + } + + return managedCluster, nil +} + +func convertToResourceReferences(resources []string) *[]containerservice.ResourceReference { + resourceReferences := make([]containerservice.ResourceReference, len(resources)) + for i := range resources { + resourceReferences[i] = containerservice.ResourceReference{ID: &resources[i]} + } + return &resourceReferences +} + +func computeDiffOfNormalizedClusters(managedCluster containerservice.ManagedCluster, existingMC containerservice.ManagedCluster) string { + // Normalize properties for the desired (CR spec) and existing managed + // cluster, so that we check only those fields that were specified in + // the initial CreateOrUpdate request and that can be modified. + // Without comparing to normalized properties, we would always get a + // difference in desired and existing, which would result in sending + // unnecessary Azure API requests. + propertiesNormalized := &containerservice.ManagedClusterProperties{ + KubernetesVersion: managedCluster.ManagedClusterProperties.KubernetesVersion, + NetworkProfile: &containerservice.NetworkProfile{}, + } + + existingMCPropertiesNormalized := &containerservice.ManagedClusterProperties{ + KubernetesVersion: existingMC.ManagedClusterProperties.KubernetesVersion, + NetworkProfile: &containerservice.NetworkProfile{}, + } + + if managedCluster.AadProfile != nil { + propertiesNormalized.AadProfile = &containerservice.ManagedClusterAADProfile{ + Managed: managedCluster.AadProfile.Managed, + EnableAzureRBAC: managedCluster.AadProfile.EnableAzureRBAC, + AdminGroupObjectIDs: managedCluster.AadProfile.AdminGroupObjectIDs, + } + } + + if existingMC.AadProfile != nil { + existingMCPropertiesNormalized.AadProfile = &containerservice.ManagedClusterAADProfile{ + Managed: existingMC.AadProfile.Managed, + EnableAzureRBAC: existingMC.AadProfile.EnableAzureRBAC, + AdminGroupObjectIDs: existingMC.AadProfile.AdminGroupObjectIDs, + } + } + + if managedCluster.NetworkProfile != nil { + propertiesNormalized.NetworkProfile.LoadBalancerProfile = managedCluster.NetworkProfile.LoadBalancerProfile + } + + if existingMC.NetworkProfile != nil { + existingMCPropertiesNormalized.NetworkProfile.LoadBalancerProfile = existingMC.NetworkProfile.LoadBalancerProfile + } + + if managedCluster.APIServerAccessProfile != nil { + propertiesNormalized.APIServerAccessProfile = &containerservice.ManagedClusterAPIServerAccessProfile{ + AuthorizedIPRanges: managedCluster.APIServerAccessProfile.AuthorizedIPRanges, + } + } + + if existingMC.APIServerAccessProfile != nil { + existingMCPropertiesNormalized.APIServerAccessProfile = &containerservice.ManagedClusterAPIServerAccessProfile{ + AuthorizedIPRanges: existingMC.APIServerAccessProfile.AuthorizedIPRanges, + } + } + + clusterNormalized := &containerservice.ManagedCluster{ + ManagedClusterProperties: propertiesNormalized, + } + existingMCClusterNormalized := &containerservice.ManagedCluster{ + ManagedClusterProperties: existingMCPropertiesNormalized, + } + + if managedCluster.Sku != nil { + clusterNormalized.Sku = managedCluster.Sku + } + if existingMC.Sku != nil { + existingMCClusterNormalized.Sku = existingMC.Sku + } + + diff := cmp.Diff(clusterNormalized, existingMCClusterNormalized) + return diff +} diff --git a/azure/services/managedclusters/spec_test.go b/azure/services/managedclusters/spec_test.go new file mode 100644 index 000000000000..79f639c63ac5 --- /dev/null +++ b/azure/services/managedclusters/spec_test.go @@ -0,0 +1,164 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedclusters + +import ( + "testing" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" + "github.com/Azure/go-autorest/autorest/to" + "github.com/google/go-cmp/cmp" + . "github.com/onsi/gomega" + gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" +) + +func TestParameters(t *testing.T) { + testcases := []struct { + name string + spec *ManagedClusterSpec + existing interface{} + expectedError string + expect func(g *WithT, result interface{}) + }{ + { + name: "managedcluster in non-terminal provisioning state", + existing: containerservice.ManagedCluster{ + ManagedClusterProperties: &containerservice.ManagedClusterProperties{ + ProvisioningState: to.StringPtr("Deleting"), + }, + }, + spec: &ManagedClusterSpec{ + Name: "test-managedcluster", + }, + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeNil()) + }, + expectedError: "Unable to update existing managed cluster in non-terminal state. Managed cluster must be in one of the following provisioning states: Canceled, Failed, or Succeeded. Actual state: Deleting. Object will be requeued after 20s", + }, + { + name: "managedcluster does not exist", + existing: nil, + spec: &ManagedClusterSpec{ + Name: "test-managedcluster", + ResourceGroup: "test-rg", + NodeResourceGroupName: "test-node-rg", + Location: "test-location", + Tags: map[string]string{ + "test-tag": "test-value", + }, + Version: "v1.22.0", + LoadBalancerSKU: "Standard", + }, + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeAssignableToTypeOf(containerservice.ManagedCluster{})) + g.Expect(gomockinternal.DiffEq(result).Matches(getSampleManagedCluster())).To(BeTrue(), cmp.Diff(result, getSampleManagedCluster())) + }, + }, + { + name: "managedcluster exists, no update needed", + existing: getExistingCluster(), + spec: &ManagedClusterSpec{ + Name: "test-managedcluster", + ResourceGroup: "test-rg", + Location: "test-location", + Tags: map[string]string{ + "test-tag": "test-value", + }, + Version: "v1.22.0", + LoadBalancerSKU: "Standard", + }, + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeNil()) + }, + }, + { + name: "managedcluster exists and an update is needed", + existing: getExistingCluster(), + spec: &ManagedClusterSpec{ + Name: "test-managedcluster", + ResourceGroup: "test-rg", + Location: "test-location", + Tags: map[string]string{ + "test-tag": "test-value", + }, + Version: "v1.22.99", + LoadBalancerSKU: "Standard", + }, + expect: func(g *WithT, result interface{}) { + g.Expect(result).To(BeAssignableToTypeOf(containerservice.ManagedCluster{})) + g.Expect(result.(containerservice.ManagedCluster).KubernetesVersion).To(Equal(to.StringPtr("v1.22.99"))) + }, + }, + // TODO (cecile): add tests for specific parameters + } + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + t.Parallel() + + result, err := tc.spec.Parameters(tc.existing) + if tc.expectedError != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(tc.expectedError)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + tc.expect(g, result) + }) + } +} + +func getExistingCluster() containerservice.ManagedCluster { + mc := getSampleManagedCluster() + mc.ProvisioningState = to.StringPtr("Succeeded") + mc.ID = to.StringPtr("test-id") + return mc +} + +func getSampleManagedCluster() containerservice.ManagedCluster { + return containerservice.ManagedCluster{ + ManagedClusterProperties: &containerservice.ManagedClusterProperties{ + KubernetesVersion: to.StringPtr("v1.22.0"), + DNSPrefix: to.StringPtr("test-managedcluster"), + AgentPoolProfiles: &[]containerservice.ManagedClusterAgentPoolProfile{}, + LinuxProfile: &containerservice.LinuxProfile{ + AdminUsername: to.StringPtr("azureuser"), + SSH: &containerservice.SSHConfiguration{ + PublicKeys: &[]containerservice.SSHPublicKey{ + { + KeyData: to.StringPtr(""), + }, + }, + }, + }, + ServicePrincipalProfile: &containerservice.ManagedClusterServicePrincipalProfile{ClientID: to.StringPtr("msi")}, + NodeResourceGroup: to.StringPtr("test-node-rg"), + EnableRBAC: to.BoolPtr(true), + NetworkProfile: &containerservice.NetworkProfile{ + LoadBalancerSku: containerservice.LoadBalancerSku("Standard"), + }, + }, + Identity: &containerservice.ManagedClusterIdentity{ + Type: containerservice.ResourceIdentityTypeSystemAssigned, + }, + Location: to.StringPtr("test-location"), + Tags: map[string]*string{ + "test-tag": to.StringPtr("test-value"), + }, + } +} diff --git a/azure/services/natgateways/spec.go b/azure/services/natgateways/spec.go index 4cf0ee384fd9..b8a7cb414048 100644 --- a/azure/services/natgateways/spec.go +++ b/azure/services/natgateways/spec.go @@ -49,6 +49,11 @@ func (s *NatGatewaySpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for NAT gateways. +func (s *NatGatewaySpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the NAT gateway. func (s *NatGatewaySpec) Parameters(existing interface{}) (params interface{}, err error) { if existing != nil { diff --git a/azure/services/networkinterfaces/spec.go b/azure/services/networkinterfaces/spec.go index c926f35b3ead..97d2706b0eaf 100644 --- a/azure/services/networkinterfaces/spec.go +++ b/azure/services/networkinterfaces/spec.go @@ -62,6 +62,11 @@ func (s *NICSpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for network interfaces. +func (s *NICSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the network interface. func (s *NICSpec) Parameters(existing interface{}) (parameters interface{}, err error) { if existing != nil { diff --git a/azure/services/routetables/spec.go b/azure/services/routetables/spec.go index 1a96099e0264..b679abb92649 100644 --- a/azure/services/routetables/spec.go +++ b/azure/services/routetables/spec.go @@ -44,6 +44,11 @@ func (s *RouteTableSpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for route tables. +func (s *RouteTableSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the route table. func (s *RouteTableSpec) Parameters(existing interface{}) (params interface{}, err error) { if existing != nil { diff --git a/azure/services/securitygroups/spec.go b/azure/services/securitygroups/spec.go index 857f2f1605fc..851e1aa45f61 100644 --- a/azure/services/securitygroups/spec.go +++ b/azure/services/securitygroups/spec.go @@ -49,6 +49,11 @@ func (s *NSGSpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for security groups. +func (s *NSGSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the security group. func (s *NSGSpec) Parameters(existing interface{}) (interface{}, error) { securityRules := make([]network.SecurityRule, 0) diff --git a/azure/services/subnets/spec.go b/azure/services/subnets/spec.go index e281412bead5..e30dbf6fd69d 100644 --- a/azure/services/subnets/spec.go +++ b/azure/services/subnets/spec.go @@ -54,6 +54,11 @@ func (s *SubnetSpec) OwnerResourceName() string { return s.VNetName } +// CustomHeaders is not implemented for subnets. +func (s *SubnetSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the subnet. func (s *SubnetSpec) Parameters(existing interface{}) (parameters interface{}, err error) { if existing != nil { diff --git a/azure/services/virtualmachines/spec.go b/azure/services/virtualmachines/spec.go index 5cf4caca8f92..ab88b48d40a9 100644 --- a/azure/services/virtualmachines/spec.go +++ b/azure/services/virtualmachines/spec.go @@ -70,6 +70,11 @@ func (s *VMSpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for virtual machines. +func (s *VMSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the virtual machine. func (s *VMSpec) Parameters(existing interface{}) (params interface{}, err error) { if existing != nil { diff --git a/azure/services/virtualnetworks/spec.go b/azure/services/virtualnetworks/spec.go index 56d3ec53d0e1..e5da99df8f97 100644 --- a/azure/services/virtualnetworks/spec.go +++ b/azure/services/virtualnetworks/spec.go @@ -48,6 +48,11 @@ func (s *VNetSpec) OwnerResourceName() string { return "" } +// CustomHeaders is not implemented for vnets. +func (s *VNetSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the vnet. func (s *VNetSpec) Parameters(existing interface{}) (interface{}, error) { if existing != nil { diff --git a/azure/services/vnetpeerings/spec.go b/azure/services/vnetpeerings/spec.go index 2c3fb8f3f899..558fe4a41b70 100644 --- a/azure/services/vnetpeerings/spec.go +++ b/azure/services/vnetpeerings/spec.go @@ -48,6 +48,11 @@ func (s *VnetPeeringSpec) OwnerResourceName() string { return s.SourceVnetName } +// CustomHeaders is not implemented for vnet peerings. +func (s *VnetPeeringSpec) CustomHeaders() map[string]string { + return nil +} + // Parameters returns the parameters for the virtual network peering. func (s *VnetPeeringSpec) Parameters(existing interface{}) (params interface{}, err error) { if existing != nil { diff --git a/azure/types.go b/azure/types.go index a134ae2615ce..d4906393230f 100644 --- a/azure/types.go +++ b/azure/types.go @@ -50,6 +50,60 @@ const ( VirtualMachineScaleSet = "VirtualMachineScaleSet" ) +// AgentPoolSpec contains agent pool specification details. +type AgentPoolSpec struct { + // Name is the name of agent pool. + Name string + + // ResourceGroup is the name of the Azure resource group for the AKS Cluster. + ResourceGroup string + + // Cluster is the name of the AKS cluster. + Cluster string + + // Version defines the desired Kubernetes version. + Version *string + + // SKU defines the Azure VM size for the agent pool VMs. + SKU string + + // Replicas is the number of desired machines. + Replicas int32 + + // OSDiskSizeGB is the OS disk size in GB for every machine in this agent pool. + OSDiskSizeGB int32 + + // VnetSubnetID is the Azure Resource ID for the subnet which should contain nodes. + VnetSubnetID string + + // Mode represents mode of an agent pool. Possible values include: 'System', 'User'. + Mode string + + // Maximum number of nodes for auto-scaling + MaxCount *int32 `json:"maxCount,omitempty"` + + // Minimum number of nodes for auto-scaling + MinCount *int32 `json:"minCount,omitempty"` + + // Node labels - labels for all of the nodes present in node pool + NodeLabels map[string]*string `json:"nodeLabels,omitempty"` + + // NodeTaints specifies the taints for nodes present in this agent pool. + NodeTaints []string `json:"nodeTaints,omitempty"` + + // EnableAutoScaling - Whether to enable auto-scaler + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` + + // AvailabilityZones represents the Availability zones for nodes in the AgentPool. + AvailabilityZones []string + + // MaxPods specifies the kubelet --max-pods configuration for the agent pool. + MaxPods *int32 `json:"maxPods,omitempty"` + + // OsDiskType specifies the OS disk type for each node in the pool. Allowed values are 'Ephemeral' and 'Managed'. + OsDiskType *string `json:"osDiskType,omitempty"` +} + // ScaleSetSpec defines the specification for a Scale Set. type ScaleSetSpec struct { Name string @@ -188,180 +242,3 @@ func (vmss VMSS) HasLatestModelApplied(vm VMSSVM) bool { // if the images match, then the VM is of the same model return reflect.DeepEqual(vm.Image, vmss.Image) } - -// ManagedClusterSpec contains properties to create a managed cluster. -type ManagedClusterSpec struct { - // Name is the name of this AKS Cluster. - Name string - - // ResourceGroupName is the name of the Azure resource group for this AKS Cluster. - ResourceGroupName string - - // NodeResourceGroupName is the name of the Azure resource group containing IaaS VMs. - NodeResourceGroupName string - - // VnetSubnetID is the Azure Resource ID for the subnet which should contain nodes. - VnetSubnetID string - - // Location is a string matching one of the canonical Azure region names. Examples: "westus2", "eastus". - Location string - - // Tags is a set of tags to add to this cluster. - Tags map[string]string - - // Version defines the desired Kubernetes version. - Version string - - // LoadBalancerSKU for the managed cluster. Possible values include: 'Standard', 'Basic'. Defaults to Standard. - LoadBalancerSKU string - - // NetworkPlugin used for building Kubernetes network. Possible values include: 'azure', 'kubenet'. Defaults to azure. - NetworkPlugin string - - // NetworkPolicy used for building Kubernetes network. Possible values include: 'calico', 'azure'. Defaults to azure. - NetworkPolicy string - - // SSHPublicKey is a string literal containing an ssh public key. Will autogenerate and discard if not provided. - SSHPublicKey string - - // AgentPools is the list of agent pool specifications in this cluster. - AgentPools []AgentPoolSpec - - // PodCIDR is the CIDR block for IP addresses distributed to pods - PodCIDR string - - // ServiceCIDR is the CIDR block for IP addresses distributed to services - ServiceCIDR string - - // DNSServiceIP is an IP address assigned to the Kubernetes DNS service - DNSServiceIP *string - - // AADProfile is Azure Active Directory configuration to integrate with AKS, for aad authentication. - AADProfile *AADProfile - - // AddonProfiles are the profiles of managed cluster add-on. - AddonProfiles []AddonProfile - - // SKU is the SKU of the AKS to be provisioned. - SKU *SKU - - // LoadBalancerProfile is the profile of the cluster load balancer. - LoadBalancerProfile *LoadBalancerProfile - - // APIServerAccessProfile is the access profile for AKS API server. - APIServerAccessProfile *APIServerAccessProfile -} - -// AADProfile is Azure Active Directory configuration to integrate with AKS, for aad authentication. -type AADProfile struct { - // Managed - Whether to enable managed AAD. - Managed bool - - // EnableAzureRBAC - Whether to enable Azure RBAC for Kubernetes authorization. - EnableAzureRBAC bool - - // AdminGroupObjectIDs - AAD group object IDs that will have admin role of the cluster. - AdminGroupObjectIDs []string -} - -// AddonProfile - Profile of managed cluster add-on. -type AddonProfile struct { - Name string - Config map[string]string - Enabled bool -} - -// SKU - AKS SKU. -type SKU struct { - // Tier - Tier of a managed cluster SKU. - Tier string -} - -// LoadBalancerProfile - Profile of the cluster load balancer. -type LoadBalancerProfile struct { - // Load balancer profile must specify at most one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs. - // By default the AKS cluster automatically creates a public IP in the AKS-managed infrastructure resource group and assigns it to the load balancer outbound pool. - // Alternatively, you can assign your own custom public IP or public IP prefix at cluster creation time. - // See https://docs.microsoft.com/en-us/azure/aks/load-balancer-standard#provide-your-own-outbound-public-ips-or-prefixes - - // ManagedOutboundIPs - Desired managed outbound IPs for the cluster load balancer. - ManagedOutboundIPs *int32 - - // OutboundIPPrefixes - Desired outbound IP Prefix resources for the cluster load balancer. - OutboundIPPrefixes []string - - // OutboundIPs - Desired outbound IP resources for the cluster load balancer. - OutboundIPs []string - - // AllocatedOutboundPorts - Desired number of allocated SNAT ports per VM. Allowed values must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports. - AllocatedOutboundPorts *int32 - - // IdleTimeoutInMinutes - Desired outbound flow idle timeout in minutes. Allowed values must be in the range of 4 to 120 (inclusive). The default value is 30 minutes. - IdleTimeoutInMinutes *int32 -} - -// APIServerAccessProfile is the access profile for AKS API server. -type APIServerAccessProfile struct { - // AuthorizedIPRanges - Authorized IP Ranges to kubernetes API server. - AuthorizedIPRanges []string - // EnablePrivateCluster - Whether to create the cluster as a private cluster or not. - EnablePrivateCluster *bool - // PrivateDNSZone - Private dns zone mode for private cluster. - PrivateDNSZone *string - // EnablePrivateClusterPublicFQDN - Whether to create additional public FQDN for private cluster or not. - EnablePrivateClusterPublicFQDN *bool -} - -// AgentPoolSpec contains agent pool specification details. -type AgentPoolSpec struct { - // Name is the name of agent pool. - Name string - - // ResourceGroup is the name of the Azure resource group for the AKS Cluster. - ResourceGroup string - - // Cluster is the name of the AKS cluster. - Cluster string - - // Version defines the desired Kubernetes version. - Version *string - - // SKU defines the Azure VM size for the agent pool VMs. - SKU string - - // Replicas is the number of desired machines. - Replicas int32 - - // OSDiskSizeGB is the OS disk size in GB for every machine in this agent pool. - OSDiskSizeGB int32 - - // VnetSubnetID is the Azure Resource ID for the subnet which should contain nodes. - VnetSubnetID string - - // Mode represents mode of an agent pool. Possible values include: 'System', 'User'. - Mode string - - // Maximum number of nodes for auto-scaling - MaxCount *int32 `json:"maxCount,omitempty"` - - // Minimum number of nodes for auto-scaling - MinCount *int32 `json:"minCount,omitempty"` - - // Node labels - labels for all of the nodes present in node pool - NodeLabels map[string]*string `json:"nodeLabels,omitempty"` - - // NodeTaints specifies the taints for nodes present in this agent pool. - NodeTaints []string `json:"nodeTaints,omitempty"` - - // EnableAutoScaling - Whether to enable auto-scaler - EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` - - // AvailabilityZones represents the Availability zones for nodes in the AgentPool. - AvailabilityZones []string - - // MaxPods specifies the kubelet --max-pods configuration for the agent pool. - MaxPods *int32 `json:"maxPods,omitempty"` - - // OsDiskType specifies the OS disk type for each node in the pool. Allowed values are 'Ephemeral' and 'Managed'. - OsDiskType *string `json:"osDiskType,omitempty"` -}