diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go index 8968d56fe8a..55e190ccd9d 100644 --- a/exp/controllers/azuremachinepool_controller.go +++ b/exp/controllers/azuremachinepool_controller.go @@ -45,12 +45,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesets" capzcntr "sigs.k8s.io/cluster-api-provider-azure/controllers" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" azure "sigs.k8s.io/cluster-api-provider-azure/cloud" "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesets" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" ) @@ -372,7 +372,7 @@ func (r *AzureMachinePoolReconciler) reconcileTags(machinePoolScope *scope.Machi vmssSpec := &scalesets.Spec{ Name: machinePoolScope.Name(), } - svc := scalesets.NewService(clusterScope, machinePoolScope) + svc := scalesets.NewService(machinePoolScope.AzureClients.Authorizer, machinePoolScope.AzureClients.SubscriptionID) vm, err := svc.Client.Get(clusterScope.Context, clusterScope.ResourceGroup(), machinePoolScope.Name()) if err != nil { return errors.Wrapf(err, "failed to query AzureMachine VMSS") @@ -463,7 +463,7 @@ func newAzureMachinePoolService(machinePoolScope *scope.MachinePoolScope, cluste return &azureMachinePoolService{ machinePoolScope: machinePoolScope, clusterScope: clusterScope, - virtualMachinesScaleSetSvc: scalesets.NewService(clusterScope, machinePoolScope), + virtualMachinesScaleSetSvc: scalesets.NewService(machinePoolScope.AzureClients.Authorizer, machinePoolScope.AzureClients.SubscriptionID), } } @@ -490,13 +490,19 @@ func (s *azureMachinePoolService) CreateOrUpdate() (*infrav1exp.VMSS, error) { } vmssSpec := &scalesets.Spec{ - Name: s.machinePoolScope.Name(), - Sku: ampSpec.Template.VMSize, - Capacity: replicas, - SSHKeyData: string(decoded), - Image: image, - OSDisk: ampSpec.Template.OSDisk, - CustomData: bootstrapData, + Name: s.machinePoolScope.Name(), + ResourceGroup: s.clusterScope.ResourceGroup(), + Location: s.clusterScope.Location(), + ClusterName: s.clusterScope.Name(), + MachinePoolName: s.machinePoolScope.Name(), + Sku: ampSpec.Template.VMSize, + Capacity: replicas, + SSHKeyData: string(decoded), + Image: image, + OSDisk: ampSpec.Template.OSDisk, + CustomData: bootstrapData, + AdditionalTags: s.machinePoolScope.AdditionalTags(), + SubnetID: s.clusterScope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID, } err = s.virtualMachinesScaleSetSvc.Reconcile(context.TODO(), vmssSpec) @@ -567,15 +573,14 @@ func (s *azureMachinePoolService) Get() (*infrav1exp.VMSS, error) { func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1exp.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind == "MachinePool" && ref.APIVersion == capiv1exp.GroupVersion.String() { - - return getMachineByName(ctx, c, obj.Namespace, ref.Name) + return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } return nil, nil } // getMachinePoolByName finds and return a Machine object using the specified params. -func getMachineByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1exp.MachinePool, error) { +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1exp.MachinePool, error) { m := &capiv1exp.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { diff --git a/exp/controllers/azuremanagedcluster_controller.go b/exp/controllers/azuremanagedcluster_controller.go new file mode 100644 index 00000000000..06129005db4 --- /dev/null +++ b/exp/controllers/azuremanagedcluster_controller.go @@ -0,0 +1,111 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// AzureManagedClusterReconciler reconciles a AzureManagedCluster object +type AzureManagedClusterReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder +} + +func (r *AzureManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1exp.AzureManagedCluster{}). + Complete(r) +} + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete + +func (r *AzureManagedClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.TODO() + log := r.Log.WithValues("namespace", req.Namespace, "aksCluster", req.Name) + + // Fetch the AzureManagedCluster instance + aksCluster := &infrav1exp.AzureManagedCluster{} + err := r.Get(ctx, req.NamespacedName, aksCluster) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, aksCluster.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + controlPlane := &infrav1exp.AzureManagedControlPlane{} + controlPlaneRef := types.NamespacedName{ + Name: cluster.Spec.ControlPlaneRef.Name, + Namespace: cluster.Namespace, + } + + log = log.WithValues("cluster", cluster.Name) + + if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to get control plane ref") + } + + log = log.WithValues("controlPlane", controlPlaneRef.Name) + + patchhelper, err := patch.NewHelper(aksCluster, r.Client) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to init patch helper") + } + + // Match whatever the control plane says. We should also enqueue + // requests from control plane to infra cluster to keep this accurate + aksCluster.Status.Ready = controlPlane.Status.Ready + aksCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint + + if err := patchhelper.Patch(ctx, aksCluster); err != nil { + return reconcile.Result{}, err + } + + log.Info("Successfully reconciled") + + return reconcile.Result{}, nil +} diff --git a/exp/controllers/azuremanagedmachinepool_controller.go b/exp/controllers/azuremanagedmachinepool_controller.go new file mode 100644 index 00000000000..d36fd705e70 --- /dev/null +++ b/exp/controllers/azuremanagedmachinepool_controller.go @@ -0,0 +1,161 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + + "sigs.k8s.io/cluster-api/util" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// AzureManagedMachinePoolReconciler reconciles a AzureManagedMachinePool object +type AzureManagedMachinePoolReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder +} + +func (r *AzureManagedMachinePoolReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1exp.AzureManagedMachinePool{}). + Complete(r) +} + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;patch +// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch + +func (r *AzureManagedMachinePoolReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.TODO() + log := r.Log.WithValues("namespace", req.Namespace, "infraPool", req.Name) + + // Fetch the AzureManagedMachinePool instance + infraPool := &infrav1exp.AzureManagedMachinePool{} + err := r.Get(ctx, req.NamespacedName, infraPool) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the owning MachinePool. + ownerPool, err := getOwnerMachinePool(ctx, r.Client, infraPool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if ownerPool == nil { + log.Info("MachinePool Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + // Fetch the Cluster. + ownerCluster, err := util.GetOwnerCluster(ctx, r.Client, ownerPool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if ownerCluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + log = log.WithValues("ownerCluster", ownerCluster.Name) + + // Fetch the corresponding control plane which has all the interesting data. + controlPlane := &infrav1exp.AzureManagedControlPlane{} + controlPlaneName := client.ObjectKey{ + Namespace: ownerCluster.Spec.ControlPlaneRef.Namespace, + Name: ownerCluster.Spec.ControlPlaneRef.Name, + } + if err := r.Client.Get(ctx, controlPlaneName, controlPlane); err != nil { + return reconcile.Result{}, err + } + + // Create the scope. + mcpScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: r.Client, + Logger: log, + ControlPlane: controlPlane, + Cluster: ownerCluster, + MachinePool: ownerPool, + InfraMachinePool: infraPool, + PatchTarget: infraPool, + }) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err) + } + + // Handle deleted clusters + if !infraPool.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, mcpScope) + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, mcpScope) +} + +func (r *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { + scope.Logger.Info("Reconciling AzureManagedMachinePool") + + // If the AzureManagedMachinePool doesn't have our finalizer, add it. + controllerutil.AddFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer) + // Register the finalizer immediately to avoid orphaning Azure resources on delete + if err := scope.PatchObject(ctx); err != nil { + return reconcile.Result{}, err + } + + if err := newAzureManagedMachinePoolReconciler(scope).Reconcile(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error creating AzureManagedMachinePool %s/%s", scope.InfraMachinePool.Namespace, scope.InfraMachinePool.Name) + } + + // No errors, so mark us ready so the Cluster API Cluster Controller can pull it + scope.InfraMachinePool.Status.Ready = true + + return reconcile.Result{}, nil +} + +func (r *AzureManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { + scope.Logger.Info("Reconciling AzureManagedMachinePool delete") + + if err := newAzureManagedMachinePoolReconciler(scope).Delete(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedMachinePool %s/%s", scope.InfraMachinePool.Namespace, scope.InfraMachinePool.Name) + } + + // Cluster is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer) + + if err := scope.PatchObject(ctx); err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} diff --git a/exp/controllers/azuremanagedmachinepool_reconciler.go b/exp/controllers/azuremanagedmachinepool_reconciler.go new file mode 100644 index 00000000000..fef47491f3b --- /dev/null +++ b/exp/controllers/azuremanagedmachinepool_reconciler.go @@ -0,0 +1,130 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" + "github.com/pkg/errors" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/agentpools" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesets" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// azureManagedMachinePoolReconciler are list of services required by cluster controller +type azureManagedMachinePoolReconciler struct { + kubeclient client.Client + agentPoolsSvc azure.GetterService + scaleSetsSvc NodeLister +} + +// NodeLister is a service interface for returning generic lists. +type NodeLister interface { + ListInstances(context.Context, string, string) ([]compute.VirtualMachineScaleSetVM, error) + List(context.Context, string) ([]compute.VirtualMachineScaleSet, error) +} + +// newAzureManagedMachinePoolReconciler populates all the services based on input scope +func newAzureManagedMachinePoolReconciler(scope *scope.ManagedControlPlaneScope) *azureManagedMachinePoolReconciler { + return &azureManagedMachinePoolReconciler{ + kubeclient: scope.Client, + agentPoolsSvc: agentpools.NewService(scope.AzureClients.Authorizer, scope.AzureClients.SubscriptionID), + scaleSetsSvc: scalesets.NewService(scope.AzureClients.Authorizer, scope.AzureClients.SubscriptionID), + } +} + +// Reconcile reconciles all the services in pre determined order +func (r *azureManagedMachinePoolReconciler) Reconcile(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { + scope.Logger.Info("reconciling machine pool") + agentPoolSpec := &agentpools.Spec{ + Name: scope.InfraMachinePool.Name, + ResourceGroup: scope.ControlPlane.Spec.ResourceGroup, + Cluster: scope.ControlPlane.Name, + SKU: scope.InfraMachinePool.Spec.SKU, + Replicas: 1, + Version: scope.MachinePool.Spec.Template.Spec.Version, + } + + if scope.InfraMachinePool.Spec.OSDiskSizeGB != nil { + agentPoolSpec.OSDiskSizeGB = *scope.InfraMachinePool.Spec.OSDiskSizeGB + } + + if scope.MachinePool.Spec.Replicas != nil { + agentPoolSpec.Replicas = *scope.MachinePool.Spec.Replicas + } + + if err := r.agentPoolsSvc.Reconcile(ctx, agentPoolSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile machine pool %s", scope.InfraMachinePool.Name) + } + + nodeResourceGroup := fmt.Sprintf("MC_%s_%s_%s", scope.ControlPlane.Spec.ResourceGroup, scope.ControlPlane.Name, scope.ControlPlane.Spec.Location) + vmss, err := r.scaleSetsSvc.List(ctx, nodeResourceGroup) + if err != nil { + return errors.Wrapf(err, "failed to list vmss in resource group %s", nodeResourceGroup) + } + + var match *compute.VirtualMachineScaleSet + for _, ss := range vmss { + ss := ss + if ss.Tags["poolName"] != nil && *ss.Tags["poolName"] == scope.InfraMachinePool.Name { + match = &ss + break + } + } + + if match == nil { + return errors.New("failed to find vm scale set matching pool") + } + + instances, err := r.scaleSetsSvc.ListInstances(ctx, nodeResourceGroup, *match.Name) + if err != nil { + return errors.Wrapf(err, "failed to reconcile machine pool %s", scope.InfraMachinePool.Name) + } + + var providerIDs = make([]string, len(instances)) + for _, vm := range instances { + vm := vm + providerIDs = append(providerIDs, fmt.Sprintf("azure://%s", *vm.ID)) + } + + scope.InfraMachinePool.Spec.ProviderIDList = providerIDs + scope.InfraMachinePool.Status.Replicas = int32(len(providerIDs)) + scope.InfraMachinePool.Status.Ready = true + + scope.Logger.Info("reconciled machine pool successfully") + return nil +} + +// Delete reconciles all the services in pre determined order +func (r *azureManagedMachinePoolReconciler) Delete(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { + agentPoolSpec := &agentpools.Spec{ + Name: scope.InfraMachinePool.Name, + ResourceGroup: scope.ControlPlane.Spec.ResourceGroup, + Cluster: scope.ControlPlane.Name, + SKU: scope.InfraMachinePool.Spec.SKU, + } + + if err := r.agentPoolsSvc.Delete(ctx, agentPoolSpec); err != nil { + return errors.Wrapf(err, "failed to delete machine pool %s", scope.InfraMachinePool.Name) + } + + return nil +} diff --git a/exp/controllers/azuremangedcontrolplane_controller.go b/exp/controllers/azuremangedcontrolplane_controller.go new file mode 100644 index 00000000000..a6da60bcba9 --- /dev/null +++ b/exp/controllers/azuremangedcontrolplane_controller.go @@ -0,0 +1,169 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/record" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" + + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + "sigs.k8s.io/cluster-api/util" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// AzureManagedControlPlaneReconciler reconciles a AzureManagedControlPlane object +type AzureManagedControlPlaneReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder +} + +func (r *AzureManagedControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1exp.AzureManagedControlPlane{}). + Complete(r) +} + +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedcontrolplanes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedcontrolplanes/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch + +func (r *AzureManagedControlPlaneReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { + ctx := context.TODO() + log := r.Log.WithValues("namespace", req.Namespace, "azureManagedControlPlanes", req.Name) + + // Fetch the AzureManagedControlPlane instance + azureControlPlane := &infrav1exp.AzureManagedControlPlane{} + err := r.Get(ctx, req.NamespacedName, azureControlPlane) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, azureControlPlane.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + // fetch default pool + defaultPoolKey := client.ObjectKey{ + Name: azureControlPlane.Spec.DefaultPoolRef.Name, + Namespace: azureControlPlane.Namespace, + } + defaultPool := &infrav1exp.AzureManagedMachinePool{} + if err := r.Client.Get(ctx, defaultPoolKey, defaultPool); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to fetch default pool reference") + } + + log = log.WithValues("azureManagedMachinePool", defaultPoolKey.Name) + + // fetch owner of default pool + // TODO(ace): create a helper in util for this + // Fetch the owning MachinePool. + ownerPool, err := getOwnerMachinePool(ctx, r.Client, defaultPool.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if ownerPool == nil { + log.Info("failed to fetch owner ref for default pool") + return reconcile.Result{}, nil + } + + log = log.WithValues("machinePool", ownerPool.Name) + + // Create the scope. + mcpScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: r.Client, + Logger: log, + Cluster: cluster, + ControlPlane: azureControlPlane, + MachinePool: ownerPool, + InfraMachinePool: defaultPool, + PatchTarget: azureControlPlane, + }) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err) + } + + // Always patch when exiting so we can persist changes to finalizers and status + defer func() { + if err := mcpScope.PatchObject(ctx); err != nil && reterr == nil { + reterr = err + } + }() + + // Handle deleted clusters + if !azureControlPlane.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, mcpScope) + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, mcpScope) +} + +func (r *AzureManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { + scope.Logger.Info("Reconciling AzureManagedControlPlane") + + // If the AzureManagedControlPlane doesn't have our finalizer, add it. + controllerutil.AddFinalizer(scope.ControlPlane, infrav1.ClusterFinalizer) + // Register the finalizer immediately to avoid orphaning Azure resources on delete + if err := scope.PatchObject(ctx); err != nil { + return reconcile.Result{}, err + } + + if err := newAzureManagedControlPlaneReconciler(scope).Reconcile(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error creating AzureManagedControlPlane %s/%s", scope.ControlPlane.Namespace, scope.ControlPlane.Name) + } + + // No errors, so mark us ready so the Cluster API Cluster Controller can pull it + scope.ControlPlane.Status.Ready = true + + return reconcile.Result{}, nil +} + +func (r *AzureManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { + scope.Logger.Info("Reconciling AzureManagedControlPlane delete") + + if err := newAzureManagedControlPlaneReconciler(scope).Delete(ctx, scope); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedControlPlane %s/%s", scope.ControlPlane.Namespace, scope.ControlPlane.Name) + } + + // Cluster is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(scope.ControlPlane, infrav1.ClusterFinalizer) + + return reconcile.Result{}, nil +} diff --git a/exp/controllers/azuremangedcontrolplane_reconciler.go b/exp/controllers/azuremangedcontrolplane_reconciler.go new file mode 100644 index 00000000000..111498aafdc --- /dev/null +++ b/exp/controllers/azuremangedcontrolplane_reconciler.go @@ -0,0 +1,184 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + azure "sigs.k8s.io/cluster-api-provider-azure/cloud" + "sigs.k8s.io/cluster-api-provider-azure/cloud/scope" + "sigs.k8s.io/cluster-api-provider-azure/cloud/services/managedclusters" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// azureManagedControlPlaneReconciler are list of services required by cluster controller +type azureManagedControlPlaneReconciler struct { + kubeclient client.Client + managedClustersSvc azure.CredentialGetter +} + +// newAzureManagedControlPlaneReconciler populates all the services based on input scope +func newAzureManagedControlPlaneReconciler(scope *scope.ManagedControlPlaneScope) *azureManagedControlPlaneReconciler { + return &azureManagedControlPlaneReconciler{ + kubeclient: scope.Client, + managedClustersSvc: managedclusters.NewService(scope.AzureClients.Authorizer, scope.AzureClients.SubscriptionID), + } +} + +// Reconcile reconciles all the services in pre determined order +func (r *azureManagedControlPlaneReconciler) Reconcile(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { + scope.Logger.Info("reconciling cluster") + managedClusterSpec := &managedclusters.Spec{ + Name: scope.ControlPlane.Name, + ResourceGroup: scope.ControlPlane.Spec.ResourceGroup, + Location: scope.ControlPlane.Spec.Location, + Tags: scope.ControlPlane.Spec.AdditionalTags, + Version: scope.ControlPlane.Spec.Version, + LoadBalancerSKU: scope.ControlPlane.Spec.LoadBalancerSKU, + NetworkPlugin: scope.ControlPlane.Spec.NetworkPlugin, + NetworkPolicy: scope.ControlPlane.Spec.NetworkPolicy, + SSHPublicKey: scope.ControlPlane.Spec.SSHPublicKey, + } + + if net := scope.Cluster.Spec.ClusterNetwork; net != nil { + if net.Services != nil { + // A user may provide zero or one CIDR blocks. If they provide an empty array, + // we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR. + if len(net.Services.CIDRBlocks) > 1 { + return errors.New("managed control planes only allow one service cidr") + } + if len(net.Services.CIDRBlocks) == 1 { + managedClusterSpec.ServiceCIDR = net.Services.CIDRBlocks[0] + } + } + if net.Pods != nil { + // A user may provide zero or one CIDR blocks. If they provide an empty array, + // we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR. + if len(net.Pods.CIDRBlocks) > 1 { + return errors.New("managed control planes only allow one service cidr") + } + if len(net.Pods.CIDRBlocks) == 1 { + managedClusterSpec.PodCIDR = net.Pods.CIDRBlocks[0] + } + } + } + + _, err := r.managedClustersSvc.Get(ctx, managedClusterSpec) + if err != nil && !azure.ResourceNotFound(err) { + return errors.Wrapf(err, "failed to reconcile managed cluster %s", scope.ControlPlane.Name) + } + + if azure.ResourceNotFound(err) { + // We are creating this cluster for the first time. + // Configure the default pool, rest will be handled by machinepool controller + defaultPoolSpec := managedclusters.PoolSpec{ + Name: scope.InfraMachinePool.Name, + SKU: scope.InfraMachinePool.Spec.SKU, + Replicas: 1, + OSDiskSizeGB: 0, + } + + // Set optional values + if scope.InfraMachinePool.Spec.OSDiskSizeGB != nil { + defaultPoolSpec.OSDiskSizeGB = *scope.InfraMachinePool.Spec.OSDiskSizeGB + } + if scope.MachinePool.Spec.Replicas != nil { + defaultPoolSpec.Replicas = *scope.MachinePool.Spec.Replicas + } + + // Add to cluster spec + managedClusterSpec.AgentPools = []managedclusters.PoolSpec{defaultPoolSpec} + } + + // Send to Azure for updates + if err := r.managedClustersSvc.Reconcile(ctx, managedClusterSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile managed cluster %s", scope.ControlPlane.Name) + } + + // Fetch newly updated cluster + managedClusterResult, err := r.managedClustersSvc.Get(ctx, managedClusterSpec) + if err != nil { + return err + } + + managedCluster, ok := managedClusterResult.(containerservice.ManagedCluster) + if !ok { + return fmt.Errorf("expected containerservice ManagedCluster object") + } + + old := scope.ControlPlane.DeepCopyObject() + + scope.ControlPlane.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: *managedCluster.ManagedClusterProperties.Fqdn, + Port: 443, + } + + if err := r.kubeclient.Patch(ctx, scope.ControlPlane, client.MergeFrom(old)); err != nil { + return errors.Wrapf(err, "failed to set control plane endpoint") + } + + // Always fetch credentials in case of rotation + data, err := r.managedClustersSvc.GetCredentials(ctx, managedClusterSpec) + if err != nil { + return errors.Wrapf(err, "failed to get credentials for managed cluster") + } + + // Construct and store secret + kubeconfig := makeKubeconfig(scope.Cluster) + _, err = controllerutil.CreateOrUpdate(ctx, r.kubeclient, kubeconfig, func() error { + kubeconfig.Data = map[string][]byte{ + secret.KubeconfigDataName: data, + } + return nil + }) + + return err +} + +// Delete reconciles all the services in pre determined order +func (r *azureManagedControlPlaneReconciler) Delete(ctx context.Context, scope *scope.ManagedControlPlaneScope) error { + if err := r.managedClustersSvc.Delete(ctx, nil); err != nil { + return errors.Wrapf(err, "failed to delete managed cluster %s", scope.ControlPlane.Name) + } + + return nil +} + +func makeKubeconfig(cluster *clusterv1.Cluster) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(cluster.Name, secret.Kubeconfig), + Namespace: cluster.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + UID: cluster.UID, + }, + }, + }, + } +} diff --git a/main.go b/main.go index 3a6fb0aa5c6..d40af16f82c 100644 --- a/main.go +++ b/main.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -38,10 +38,11 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/controllers" infrav1alpha3exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" infrav1controllersexp "sigs.k8s.io/cluster-api-provider-azure/exp/controllers" + capifeature "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api-provider-azure/feature" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3" - "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -205,10 +206,9 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureCluster") os.Exit(1) } - // just use CAPI MachinePool feature flag rather than create a new one setupLog.V(1).Info(fmt.Sprintf("%+v\n", feature.Gates)) - if feature.Gates.Enabled(feature.MachinePool) { + if feature.Gates.Enabled(capifeature.MachinePool) { if err = (&infrav1controllersexp.AzureMachinePoolReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("AzureMachinePool"), @@ -217,7 +217,34 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool") os.Exit(1) } + if feature.Gates.Enabled(feature.AKS) { + if err = (&infrav1controllersexp.AzureManagedMachinePoolReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AzureManagedMachinePool"), + Recorder: mgr.GetEventRecorderFor("azuremachine-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureManagedMachinePool") + os.Exit(1) + } + if err = (&infrav1controllersexp.AzureManagedClusterReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AzureManagedCluster"), + Recorder: mgr.GetEventRecorderFor("azuremanagedcluster-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureManagedCluster") + os.Exit(1) + } + if err = (&infrav1controllersexp.AzureManagedControlPlaneReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AzureManagedControlPlane"), + Recorder: mgr.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"), + }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AzureManagedControlPlane") + os.Exit(1) + } + } } + // } } else { if err = (&infrav1alpha3.AzureCluster{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureCluster") @@ -232,7 +259,7 @@ func main() { os.Exit(1) } // just use CAPI MachinePool feature flag rather than create a new one - if feature.Gates.Enabled(feature.MachinePool) { + if feature.Gates.Enabled(capifeature.MachinePool) { if err = (&infrav1alpha3exp.AzureMachinePool{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePool") os.Exit(1)