From e09bf5aa5e42b812e2954ed58a6dda1be1f9f35e Mon Sep 17 00:00:00 2001 From: Alexander Eldeib Date: Thu, 7 May 2020 14:53:26 -0700 Subject: [PATCH] :hammer_and_pick: cleanup from review feedback Co-authored-by: Vince Prignano Signed-off-by: Alexander Eldeib --- cloud/interfaces.go | 4 +- cloud/scope/machinepool.go | 4 + cloud/scope/managedcontrolplane.go | 3 +- cloud/services/agentpools/agentpools.go | 38 +++++--- cloud/services/agentpools/client.go | 15 ++-- cloud/services/managedclusters/client.go | 19 ++-- .../managedclusters/managedclusters.go | 21 ++--- cloud/services/publicips/publicips.go | 5 +- cloud/services/scalesets/vmss.go | 8 +- ...re.cluster.x-k8s.io_azuremachinepools.yaml | 4 + ...er.x-k8s.io_azuremanagedcontrolplanes.yaml | 6 ++ controllers/azurecluster_reconciler.go | 3 +- exp/api/v1alpha3/azuremachinepool_types.go | 1 + .../azuremanagedcontrolplane_types.go | 6 ++ .../azuremangedcontrolplane_controller.go | 1 + .../azuremangedcontrolplane_reconciler.go | 87 +++++++++++++------ templates/cluster-template-aks.yaml | 16 ++-- templates/flavors/aks/cluster-template.yaml | 16 ++-- 18 files changed, 163 insertions(+), 94 deletions(-) diff --git a/cloud/interfaces.go b/cloud/interfaces.go index 4b0bc45bdcc..28954037705 100644 --- a/cloud/interfaces.go +++ b/cloud/interfaces.go @@ -36,7 +36,9 @@ type GetterService interface { Delete(ctx context.Context, spec interface{}) error } +// CredentialGetter is a GetterService which knows how to retrieve credentials for an Azure +// resource in a resource group. type CredentialGetter interface { GetterService - GetCredentials(ctx context.Context, spec interface{}) ([]byte, error) + GetCredentials(ctx context.Context, group string, cluster string) ([]byte, error) } diff --git a/cloud/scope/machinepool.go b/cloud/scope/machinepool.go index c9b4621a5ae..dbe469b2494 100644 --- a/cloud/scope/machinepool.go +++ b/cloud/scope/machinepool.go @@ -86,6 +86,10 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro params.Logger = klogr.New() } + if err := params.AzureClients.setCredentials(params.AzureCluster.Spec.SubscriptionID); err != nil { + return nil, errors.Wrap(err, "failed to create Azure session") + } + helper, err := patch.NewHelper(params.AzureMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") diff --git a/cloud/scope/managedcontrolplane.go b/cloud/scope/managedcontrolplane.go index 68b2431d0a6..9c80e636e2e 100644 --- a/cloud/scope/managedcontrolplane.go +++ b/cloud/scope/managedcontrolplane.go @@ -58,8 +58,7 @@ func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*Manage params.Logger = klogr.New() } - err := params.AzureClients.setCredentials(params.ControlPlane.Spec.SubscriptionID) - if err != nil { + if err := params.AzureClients.setCredentials(params.ControlPlane.Spec.SubscriptionID); err != nil { return nil, errors.Wrap(err, "failed to create Azure session") } diff --git a/cloud/services/agentpools/agentpools.go b/cloud/services/agentpools/agentpools.go index c446da8a123..fbb19e3a80c 100644 --- a/cloud/services/agentpools/agentpools.go +++ b/cloud/services/agentpools/agentpools.go @@ -65,12 +65,26 @@ func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { } existingSpec, err := s.Get(ctx, spec) + if err != nil && !azure.ResourceNotFound(err) { + return errors.Wrapf(err, "failed to get existing agent pool") + } existingPool, ok := existingSpec.(containerservice.AgentPool) if !ok { return errors.New("expected agent pool specification") } - if err == nil { + // For updates, we want to pass whatever we find in the existing + // cluster, normalized to reflect the input we originally provided. + // AKS will populate defaults and read-only values, which we want + // to strip/clean to match what we expect. + isCreate := azure.ResourceNotFound(err) + if isCreate { + err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile) + if err != nil { + return fmt.Errorf("failed to create or update agent pool, %#+v", err) + } + } else { + // Normalize individual agent pools to diff in case we need to update existingProfile := containerservice.AgentPool{ ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ VMSize: existingPool.ManagedClusterAgentPoolProfileProperties.VMSize, @@ -81,20 +95,16 @@ func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { }, } + // Diff and check if we require an update diff := cmp.Diff(profile, existingProfile) if diff != "" { - klog.V(2).Infof("update required (+new -old):\n%s", diff) + klog.V(2).Infof("Update required (+new -old):\n%s", diff) err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile) if err != nil { - return fmt.Errorf("failed to create or update agent pool, %#+v", err) + return fmt.Errorf("failed to create or update agent pool, %#+v", err.Error()) } } else { - klog.V(2).Infof("normalized and desired managed cluster matched, no update needed") - } - } else if azure.ResourceNotFound(err) { - err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile) - if err != nil { - return fmt.Errorf("failed to create or update agent pool, %#+v", err) + klog.V(2).Infof("Normalized and desired agent pool matched, no update needed") } } @@ -110,14 +120,14 @@ func (s *Service) Delete(ctx context.Context, spec interface{}) error { klog.V(2).Infof("deleting agent pool %s ", agentPoolSpec.Name) err := s.Client.Delete(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name) - if err != nil && azure.ResourceNotFound(err) { - // already deleted - return nil - } if err != nil { + if azure.ResourceNotFound(err) { + // already deleted + return nil + } return errors.Wrapf(err, "failed to delete agent pool %s in resource group %s", agentPoolSpec.Name, agentPoolSpec.ResourceGroup) } - klog.V(2).Infof("successfully deleted agent pool %s ", agentPoolSpec.Name) + klog.V(2).Infof("Successfully deleted agent pool %s ", agentPoolSpec.Name) return nil } diff --git a/cloud/services/agentpools/client.go b/cloud/services/agentpools/client.go index e1f3a56b490..b62cc7ac26f 100644 --- a/cloud/services/agentpools/client.go +++ b/cloud/services/agentpools/client.go @@ -21,6 +21,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" "github.com/Azure/go-autorest/autorest" + "github.com/pkg/errors" azure "sigs.k8s.io/cluster-api-provider-azure/cloud" ) @@ -61,11 +62,10 @@ func (ac *AzureClient) Get(ctx context.Context, resourceGroupName, cluster, name func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, cluster, name string, properties containerservice.AgentPool) error { future, err := ac.agentpools.CreateOrUpdate(ctx, resourceGroupName, cluster, name, properties) if err != nil { - return err + return errors.Wrap(err, "failed to begin operation") } - err = future.WaitForCompletionRef(ctx, ac.agentpools.Client) - if err != nil { - return err + if err := future.WaitForCompletionRef(ctx, ac.agentpools.Client); err != nil { + return errors.Wrap(err, "failed to end operation") } _, err = future.Result(ac.agentpools) return err @@ -75,11 +75,10 @@ func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, cl func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, cluster, name string) error { future, err := ac.agentpools.Delete(ctx, resourceGroupName, cluster, name) if err != nil { - return err + return errors.Wrap(err, "failed to begin operation") } - err = future.WaitForCompletionRef(ctx, ac.agentpools.Client) - if err != nil { - return err + if err := future.WaitForCompletionRef(ctx, ac.agentpools.Client); err != nil { + return errors.Wrap(err, "failed to end operation") } _, err = future.Result(ac.agentpools) return err diff --git a/cloud/services/managedclusters/client.go b/cloud/services/managedclusters/client.go index 809c528882e..43891dd9627 100644 --- a/cloud/services/managedclusters/client.go +++ b/cloud/services/managedclusters/client.go @@ -42,8 +42,9 @@ var _ Client = &AzureClient{} // NewClient creates a new VM client from subscription ID. func NewClient(subscriptionID string, authorizer autorest.Authorizer) *AzureClient { - c := newManagedClustersClient(subscriptionID, authorizer) - return &AzureClient{c} + return &AzureClient{ + managedclusters: newManagedClustersClient(subscriptionID, authorizer), + } } // newManagedClustersClient creates a new managed clusters client from subscription ID. @@ -77,11 +78,10 @@ func (ac *AzureClient) GetCredentials(ctx context.Context, resourceGroupName, na func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, name string, cluster containerservice.ManagedCluster) error { future, err := ac.managedclusters.CreateOrUpdate(ctx, resourceGroupName, name, cluster) if err != nil { - return err + return errors.Wrapf(err, "failed to begin operation") } - err = future.WaitForCompletionRef(ctx, ac.managedclusters.Client) - if err != nil { - return err + if err := future.WaitForCompletionRef(ctx, ac.managedclusters.Client); err != nil { + return errors.Wrapf(err, "failed to end operation") } _, err = future.Result(ac.managedclusters) return err @@ -91,11 +91,10 @@ func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, na func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, name string) error { future, err := ac.managedclusters.Delete(ctx, resourceGroupName, name) if err != nil { - return err + return errors.Wrapf(err, "failed to begin operation") } - err = future.WaitForCompletionRef(ctx, ac.managedclusters.Client) - if err != nil { - return err + if err := future.WaitForCompletionRef(ctx, ac.managedclusters.Client); err != nil { + return errors.Wrapf(err, "failed to end operation") } _, err = future.Result(ac.managedclusters) return err diff --git a/cloud/services/managedclusters/managedclusters.go b/cloud/services/managedclusters/managedclusters.go index 4f21a620d3b..70d6190e0de 100644 --- a/cloud/services/managedclusters/managedclusters.go +++ b/cloud/services/managedclusters/managedclusters.go @@ -45,7 +45,6 @@ type Spec struct { Location string // Tags is a set of tags to add to this cluster. - // +optional Tags map[string]string // Version defines the desired Kubernetes version. @@ -90,12 +89,8 @@ func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error } // Get fetches a managed cluster kubeconfig from Azure. -func (s *Service) GetCredentials(ctx context.Context, spec interface{}) ([]byte, error) { - managedClusterSpec, ok := spec.(*Spec) - if !ok { - return nil, errors.New("expected managed cluster specification") - } - return s.Client.GetCredentials(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name) +func (s *Service) GetCredentials(ctx context.Context, group, name string) ([]byte, error) { + return s.Client.GetCredentials(ctx, group, name) } // Reconcile idempotently creates or updates a managed cluster, if possible. @@ -185,7 +180,7 @@ func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { err := s.Client.CreateOrUpdate(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name, properties) if err != nil { - return fmt.Errorf("failed to create or update managed cluster, %v", err) + return fmt.Errorf("failed to create or update managed cluster, %#+v", err) } return nil @@ -198,13 +193,13 @@ func (s *Service) Delete(ctx context.Context, spec interface{}) error { return errors.New("expected managed cluster specification") } - klog.V(2).Infof("deleting managed cluster %s ", managedClusterSpec.Name) + klog.V(2).Infof("Deleting managed cluster %s ", managedClusterSpec.Name) err := s.Client.Delete(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name) - if err != nil && azure.ResourceNotFound(err) { - // already deleted - return nil - } if err != nil { + if azure.ResourceNotFound(err) { + // already deleted + return nil + } return errors.Wrapf(err, "failed to delete managed cluster %s in resource group %s", managedClusterSpec.Name, managedClusterSpec.ResourceGroup) } diff --git a/cloud/services/publicips/publicips.go b/cloud/services/publicips/publicips.go index c206965ab94..dbc135a6e5c 100644 --- a/cloud/services/publicips/publicips.go +++ b/cloud/services/publicips/publicips.go @@ -29,7 +29,8 @@ import ( // Spec specification for public ip type Spec struct { - Name string + Name string + DNSName string } // Get provides information about a public ip. @@ -70,7 +71,7 @@ func (s *Service) Reconcile(ctx context.Context, spec interface{}) error { PublicIPAllocationMethod: network.Static, DNSSettings: &network.PublicIPAddressDNSSettings{ DomainNameLabel: to.StringPtr(strings.ToLower(ipName)), - Fqdn: to.StringPtr(s.Scope.Network().APIServerIP.DNSName), + Fqdn: &publicIPSpec.DNSName, }, }, }, diff --git a/cloud/services/scalesets/vmss.go b/cloud/services/scalesets/vmss.go index 93046c8cae9..369398b286f 100644 --- a/cloud/services/scalesets/vmss.go +++ b/cloud/services/scalesets/vmss.go @@ -170,11 +170,11 @@ func (s *Service) Delete(ctx context.Context, spec interface{}) error { } klog.V(2).Infof("deleting VMSS %s ", vmssSpec.Name) err := s.Client.Delete(ctx, vmssSpec.ResourceGroup, vmssSpec.Name) - if err != nil && azure.ResourceNotFound(err) { - // already deleted - return nil - } if err != nil { + if azure.ResourceNotFound(err) { + // already deleted + return nil + } return errors.Wrapf(err, "failed to delete VMSS %s in resource group %s", vmssSpec.Name, vmssSpec.ResourceGroup) } diff --git a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml index b273c43338c..c0580a48c65 100644 --- a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml +++ b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremachinepools.yaml @@ -10,9 +10,13 @@ metadata: spec: group: exp.infrastructure.cluster.x-k8s.io names: + categories: + - cluster-api kind: AzureMachinePool listKind: AzureMachinePoolList plural: azuremachinepools + shortNames: + - amp singular: azuremachinepool scope: Namespaced versions: diff --git a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml index 6e50279736d..4340e67f578 100644 --- a/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml +++ b/config/crd/bases/exp.infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml @@ -127,6 +127,12 @@ spec: description: AzureManagedControlPlaneStatus defines the observed state of AzureManagedControlPlane properties: + initialized: + description: Initialized is true when the the control plane is available + for initial contact. This may occur before the control plane is + fully ready. In the AzureManagedControlPlane implementation, these + are identical. + type: boolean ready: description: Ready is true when the provider resource is ready. type: boolean diff --git a/controllers/azurecluster_reconciler.go b/controllers/azurecluster_reconciler.go index 915a0a8eb9c..14897af35b2 100644 --- a/controllers/azurecluster_reconciler.go +++ b/controllers/azurecluster_reconciler.go @@ -205,7 +205,8 @@ func (r *azureClusterReconciler) Reconcile() error { } publicIPSpec := &publicips.Spec{ - Name: r.scope.Network().APIServerIP.Name, + Name: r.scope.Network().APIServerIP.Name, + DNSName: r.scope.Network().APIServerIP.DNSName, } if err := r.publicIPSvc.Reconcile(r.scope.Context, publicIPSpec); err != nil { return errors.Wrapf(err, "failed to reconcile control plane public ip for cluster %s", r.scope.Name()) diff --git a/exp/api/v1alpha3/azuremachinepool_types.go b/exp/api/v1alpha3/azuremachinepool_types.go index d0188a7ad33..a1a55708313 100644 --- a/exp/api/v1alpha3/azuremachinepool_types.go +++ b/exp/api/v1alpha3/azuremachinepool_types.go @@ -121,6 +121,7 @@ type ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status + // +kubebuilder:resource:path=azuremachinepools,scope=Namespaced,categories=cluster-api,shortName=amp // +kubebuilder:printcolumn:name="Replicas",type="string",JSONPath=".status.replicas",description="AzureMachinePool replicas count" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="AzureMachinePool replicas count" // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.provisioningState",description="Azure VMSS provisioning state" diff --git a/exp/api/v1alpha3/azuremanagedcontrolplane_types.go b/exp/api/v1alpha3/azuremanagedcontrolplane_types.go index 5835ec0cd38..ccc7558e8d1 100644 --- a/exp/api/v1alpha3/azuremanagedcontrolplane_types.go +++ b/exp/api/v1alpha3/azuremanagedcontrolplane_types.go @@ -72,6 +72,12 @@ type AzureManagedControlPlaneStatus struct { // Ready is true when the provider resource is ready. // +optional Ready bool `json:"ready,omitempty"` + + // Initialized is true when the the control plane is available for initial contact. + // This may occur before the control plane is fully ready. + // In the AzureManagedControlPlane implementation, these are identical. + // +optional + Initialized bool `json:"initialized,omitempty"` } // +kubebuilder:object:root=true diff --git a/exp/controllers/azuremangedcontrolplane_controller.go b/exp/controllers/azuremangedcontrolplane_controller.go index a6da60bcba9..167837631a4 100644 --- a/exp/controllers/azuremangedcontrolplane_controller.go +++ b/exp/controllers/azuremangedcontrolplane_controller.go @@ -151,6 +151,7 @@ func (r *AzureManagedControlPlaneReconciler) reconcileNormal(ctx context.Context // No errors, so mark us ready so the Cluster API Cluster Controller can pull it scope.ControlPlane.Status.Ready = true + scope.ControlPlane.Status.Initialized = true return reconcile.Result{}, nil } diff --git a/exp/controllers/azuremangedcontrolplane_reconciler.go b/exp/controllers/azuremangedcontrolplane_reconciler.go index 111498aafdc..e198fc6bb40 100644 --- a/exp/controllers/azuremangedcontrolplane_reconciler.go +++ b/exp/controllers/azuremangedcontrolplane_reconciler.go @@ -27,6 +27,7 @@ import ( azure "sigs.k8s.io/cluster-api-provider-azure/cloud" "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" "sigs.k8s.io/cluster-api-provider-azure/cloud/services/managedclusters" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" @@ -49,7 +50,6 @@ func newAzureManagedControlPlaneReconciler(scope *scope.ManagedControlPlaneScope // Reconcile reconciles all the services in pre determined order func (r *azureManagedControlPlaneReconciler) Reconcile(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { - scope.Logger.Info("reconciling cluster") managedClusterSpec := &managedclusters.Spec{ Name: scope.ControlPlane.Name, ResourceGroup: scope.ControlPlane.Spec.ResourceGroup, @@ -62,6 +62,46 @@ func (r *azureManagedControlPlaneReconciler) Reconcile(ctx context.Context, scop SSHPublicKey: scope.ControlPlane.Spec.SSHPublicKey, } + scope.Logger.V(2).Info("Reconciling managed cluster") + if err := r.reconcileManagedCluster(ctx, scope, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile managed cluster") + } + + scope.Logger.V(2).Info("Reconciling endpoint") + if err := r.reconcileEndpoint(ctx, scope, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile control plane endpoint") + } + + scope.Logger.V(2).Info("Reconciling kubeconfig") + if err := r.reconcileKubeconfig(ctx, scope, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile kubeconfig secret") + } + + return nil +} + +// Delete reconciles all the services in pre determined order +func (r *azureManagedControlPlaneReconciler) Delete(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { + managedClusterSpec := &managedclusters.Spec{ + Name: scope.ControlPlane.Name, + ResourceGroup: scope.ControlPlane.Spec.ResourceGroup, + Location: scope.ControlPlane.Spec.Location, + Tags: scope.ControlPlane.Spec.AdditionalTags, + Version: scope.ControlPlane.Spec.Version, + LoadBalancerSKU: scope.ControlPlane.Spec.LoadBalancerSKU, + NetworkPlugin: scope.ControlPlane.Spec.NetworkPlugin, + NetworkPolicy: scope.ControlPlane.Spec.NetworkPolicy, + SSHPublicKey: scope.ControlPlane.Spec.SSHPublicKey, + } + + if err := r.managedClustersSvc.Delete(ctx, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to delete managed cluster %s", scope.ControlPlane.Name) + } + + return nil +} + +func (r *azureManagedControlPlaneReconciler) reconcileManagedCluster(ctx context.Context, scope *scope.ManagedControlPlaneScope, managedClusterSpec *managedclusters.Spec) error { if net := scope.Cluster.Spec.ClusterNetwork; net != nil { if net.Services != nil { // A user may provide zero or one CIDR blocks. If they provide an empty array, @@ -86,13 +126,16 @@ func (r *azureManagedControlPlaneReconciler) Reconcile(ctx context.Context, scop } _, err := r.managedClustersSvc.Get(ctx, managedClusterSpec) + // Transient or other failure not due to 404 if err != nil && !azure.ResourceNotFound(err) { - return errors.Wrapf(err, "failed to reconcile managed cluster %s", scope.ControlPlane.Name) + return errors.Wrapf(err, "failed to fetch existing managed cluster") } + // We are creating this cluster for the first time. + // Configure the default pool, rest will be handled by machinepool controller + // We do this here because AKS will only let us mutate agent pools via managed + // clusters API at create time, not update. if azure.ResourceNotFound(err) { - // We are creating this cluster for the first time. - // Configure the default pool, rest will be handled by machinepool controller defaultPoolSpec := managedclusters.PoolSpec{ Name: scope.InfraMachinePool.Name, SKU: scope.InfraMachinePool.Spec.SKU, @@ -112,11 +155,14 @@ func (r *azureManagedControlPlaneReconciler) Reconcile(ctx context.Context, scop managedClusterSpec.AgentPools = []managedclusters.PoolSpec{defaultPoolSpec} } - // Send to Azure for updates + // Send to Azure for create/update. if err := r.managedClustersSvc.Reconcile(ctx, managedClusterSpec); err != nil { return errors.Wrapf(err, "failed to reconcile managed cluster %s", scope.ControlPlane.Name) } + return nil +} +func (r *azureManagedControlPlaneReconciler) reconcileEndpoint(ctx context.Context, scope *scope.ManagedControlPlaneScope, managedClusterSpec *managedclusters.Spec) error { // Fetch newly updated cluster managedClusterResult, err := r.managedClustersSvc.Get(ctx, managedClusterSpec) if err != nil { @@ -139,45 +185,36 @@ func (r *azureManagedControlPlaneReconciler) Reconcile(ctx context.Context, scop return errors.Wrapf(err, "failed to set control plane endpoint") } + return nil +} + +func (r *azureManagedControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, scope *scope.ManagedControlPlaneScope, managedClusterSpec *managedclusters.Spec) error { // Always fetch credentials in case of rotation - data, err := r.managedClustersSvc.GetCredentials(ctx, managedClusterSpec) + data, err := r.managedClustersSvc.GetCredentials(ctx, managedClusterSpec.ResourceGroup, managedClusterSpec.Name) if err != nil { return errors.Wrapf(err, "failed to get credentials for managed cluster") } // Construct and store secret - kubeconfig := makeKubeconfig(scope.Cluster) - _, err = controllerutil.CreateOrUpdate(ctx, r.kubeclient, kubeconfig, func() error { + kubeconfig := makeKubeconfig(scope.Cluster, scope.ControlPlane) + if _, err := controllerutil.CreateOrUpdate(ctx, r.kubeclient, kubeconfig, func() error { kubeconfig.Data = map[string][]byte{ secret.KubeconfigDataName: data, } return nil - }) - - return err -} - -// Delete reconciles all the services in pre determined order -func (r *azureManagedControlPlaneReconciler) Delete(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { - if err := r.managedClustersSvc.Delete(ctx, nil); err != nil { - return errors.Wrapf(err, "failed to delete managed cluster %s", scope.ControlPlane.Name) + }); err != nil { + return errors.Wrapf(err, "failed to kubeconfig secret for cluster") } - return nil } -func makeKubeconfig(cluster *clusterv1.Cluster) *corev1.Secret { +func makeKubeconfig(cluster *clusterv1.Cluster, controlPlane *infrav1exp.AzureManagedControlPlane) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secret.Name(cluster.Name, secret.Kubeconfig), Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Cluster", - Name: cluster.Name, - UID: cluster.UID, - }, + *metav1.NewControllerRef(controlPlane, infrav1exp.GroupVersion.WithKind("AzureManagedControlPlane")), }, }, } diff --git a/templates/cluster-template-aks.yaml b/templates/cluster-template-aks.yaml index e43f32ebfc6..401105e80ec 100644 --- a/templates/cluster-template-aks.yaml +++ b/templates/cluster-template-aks.yaml @@ -23,7 +23,7 @@ metadata: name: ${CLUSTER_NAME}-control-plane spec: defaultPoolRef: - name: defaultpool + name: agentpool0 location: ${AZURE_LOCATION} resourceGroup: ${AZURE_RESOURCE_GROUP} sshPublicKey: ${AZURE_SSH_PUBLIC_KEY} @@ -34,11 +34,13 @@ apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedCluster metadata: name: ${CLUSTER_NAME} +spec: + subscriptionID: ${AZURE_SUBSCRIPTION_ID} --- apiVersion: exp.cluster.x-k8s.io/v1alpha3 kind: MachinePool metadata: - name: defaultpool + name: agentpool0 spec: clusterName: ${CLUSTER_NAME} replicas: ${WORKER_MACHINE_COUNT} @@ -51,14 +53,14 @@ spec: infrastructureRef: apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedMachinePool - name: defaultpool + name: agentpool0 namespace: default version: ${KUBERNETES_VERSION} --- apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedMachinePool metadata: - name: defaultpool + name: agentpool0 spec: osDiskSizeGB: 512 sku: ${AZURE_NODE_MACHINE_TYPE} @@ -66,7 +68,7 @@ spec: apiVersion: exp.cluster.x-k8s.io/v1alpha3 kind: MachinePool metadata: - name: agentpool2 + name: agentpool1 spec: clusterName: ${CLUSTER_NAME} replicas: ${WORKER_MACHINE_COUNT} @@ -79,14 +81,14 @@ spec: infrastructureRef: apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedMachinePool - name: agentpool2 + name: agentpool1 namespace: default version: ${KUBERNETES_VERSION} --- apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedMachinePool metadata: - name: agentpool2 + name: agentpool1 spec: osDiskSizeGB: 1024 sku: ${AZURE_NODE_MACHINE_TYPE} diff --git a/templates/flavors/aks/cluster-template.yaml b/templates/flavors/aks/cluster-template.yaml index 852db3fcbf6..4aa523e9d6f 100644 --- a/templates/flavors/aks/cluster-template.yaml +++ b/templates/flavors/aks/cluster-template.yaml @@ -32,7 +32,7 @@ spec: resourceGroup: "${AZURE_RESOURCE_GROUP}" location: "${AZURE_LOCATION}" defaultPoolRef: - name: "defaultpool" + name: "agentpool0" sshPublicKey: "${AZURE_SSH_PUBLIC_KEY}" version: "${KUBERNETES_VERSION}" --- @@ -43,6 +43,8 @@ apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedCluster metadata: name: ${CLUSTER_NAME} +spec: + subscriptionID: ${AZURE_SUBSCRIPTION_ID} --- # We provision a default machine pool with no boostrap data (AKS will provide it). # We specify an AzureManagedMachinePool as the infrastructure machine it, which @@ -50,7 +52,7 @@ metadata: apiVersion: exp.cluster.x-k8s.io/v1alpha3 kind: MachinePool metadata: - name: "defaultpool" + name: "agentpool0" spec: clusterName: "${CLUSTER_NAME}" replicas: ${WORKER_MACHINE_COUNT} @@ -63,7 +65,7 @@ spec: infrastructureRef: apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedMachinePool - name: "defaultpool" + name: "agentpool0" namespace: default version: "${KUBERNETES_VERSION}" --- @@ -72,7 +74,7 @@ spec: apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedMachinePool metadata: - name: "defaultpool" + name: "agentpool0" spec: osDiskSizeGB: 512 sku: "${AZURE_NODE_MACHINE_TYPE}" @@ -81,7 +83,7 @@ spec: apiVersion: exp.cluster.x-k8s.io/v1alpha3 kind: MachinePool metadata: - name: "agentpool2" + name: "agentpool1" spec: clusterName: "${CLUSTER_NAME}" replicas: ${WORKER_MACHINE_COUNT} @@ -94,7 +96,7 @@ spec: infrastructureRef: apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedMachinePool - name: "agentpool2" + name: "agentpool1" namespace: default version: "${KUBERNETES_VERSION}" --- @@ -102,7 +104,7 @@ spec: apiVersion: exp.infrastructure.cluster.x-k8s.io/v1alpha3 kind: AzureManagedMachinePool metadata: - name: "agentpool2" + name: "agentpool1" spec: osDiskSizeGB: 1024 sku: "${AZURE_NODE_MACHINE_TYPE}"