Skip to content

Commit

Permalink
✨ implement controllers
Browse files Browse the repository at this point in the history
  • Loading branch information
alexeldeib committed May 7, 2020
1 parent fde4598 commit 451a0ff
Show file tree
Hide file tree
Showing 7 changed files with 805 additions and 18 deletions.
31 changes: 18 additions & 13 deletions exp/controllers/azuremachinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,12 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"

"sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesets"
capzcntr "sigs.k8s.io/cluster-api-provider-azure/controllers"

infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3"
azure "sigs.k8s.io/cluster-api-provider-azure/cloud"
"sigs.k8s.io/cluster-api-provider-azure/cloud/scope"
"sigs.k8s.io/cluster-api-provider-azure/cloud/services/scalesets"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3"
)

Expand Down Expand Up @@ -372,7 +372,7 @@ func (r *AzureMachinePoolReconciler) reconcileTags(machinePoolScope *scope.Machi
vmssSpec := &scalesets.Spec{
Name: machinePoolScope.Name(),
}
svc := scalesets.NewService(clusterScope, machinePoolScope)
svc := scalesets.NewService(machinePoolScope.AzureClients.Authorizer, machinePoolScope.AzureClients.SubscriptionID)
vm, err := svc.Client.Get(clusterScope.Context, clusterScope.ResourceGroup(), machinePoolScope.Name())
if err != nil {
return errors.Wrapf(err, "failed to query AzureMachine VMSS")
Expand Down Expand Up @@ -463,7 +463,7 @@ func newAzureMachinePoolService(machinePoolScope *scope.MachinePoolScope, cluste
return &azureMachinePoolService{
machinePoolScope: machinePoolScope,
clusterScope: clusterScope,
virtualMachinesScaleSetSvc: scalesets.NewService(clusterScope, machinePoolScope),
virtualMachinesScaleSetSvc: scalesets.NewService(machinePoolScope.AzureClients.Authorizer, machinePoolScope.AzureClients.SubscriptionID),
}
}

Expand All @@ -490,13 +490,19 @@ func (s *azureMachinePoolService) CreateOrUpdate() (*infrav1exp.VMSS, error) {
}

vmssSpec := &scalesets.Spec{
Name: s.machinePoolScope.Name(),
Sku: ampSpec.Template.VMSize,
Capacity: replicas,
SSHKeyData: string(decoded),
Image: image,
OSDisk: ampSpec.Template.OSDisk,
CustomData: bootstrapData,
Name: s.machinePoolScope.Name(),
ResourceGroup: s.clusterScope.ResourceGroup(),
Location: s.clusterScope.Location(),
ClusterName: s.clusterScope.Name(),
MachinePoolName: s.machinePoolScope.Name(),
Sku: ampSpec.Template.VMSize,
Capacity: replicas,
SSHKeyData: string(decoded),
Image: image,
OSDisk: ampSpec.Template.OSDisk,
CustomData: bootstrapData,
AdditionalTags: s.machinePoolScope.AdditionalTags(),
SubnetID: s.clusterScope.AzureCluster.Spec.NetworkSpec.Subnets[0].ID,
}

err = s.virtualMachinesScaleSetSvc.Reconcile(context.TODO(), vmssSpec)
Expand Down Expand Up @@ -567,15 +573,14 @@ func (s *azureMachinePoolService) Get() (*infrav1exp.VMSS, error) {
func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1exp.MachinePool, error) {
for _, ref := range obj.OwnerReferences {
if ref.Kind == "MachinePool" && ref.APIVersion == capiv1exp.GroupVersion.String() {

return getMachineByName(ctx, c, obj.Namespace, ref.Name)
return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name)
}
}
return nil, nil
}

// getMachinePoolByName finds and return a Machine object using the specified params.
func getMachineByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1exp.MachinePool, error) {
func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1exp.MachinePool, error) {
m := &capiv1exp.MachinePool{}
key := client.ObjectKey{Name: name, Namespace: namespace}
if err := c.Get(ctx, key, m); err != nil {
Expand Down
111 changes: 111 additions & 0 deletions exp/controllers/azuremanagedcluster_controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
"context"

"github.com/go-logr/logr"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3"

"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

// AzureManagedClusterReconciler reconciles a AzureManagedCluster object
type AzureManagedClusterReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
}

func (r *AzureManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1exp.AzureManagedCluster{}).
Complete(r)
}

// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete

func (r *AzureManagedClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
log := r.Log.WithValues("namespace", req.Namespace, "aksCluster", req.Name)

// Fetch the AzureManagedCluster instance
aksCluster := &infrav1exp.AzureManagedCluster{}
err := r.Get(ctx, req.NamespacedName, aksCluster)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}

// Fetch the Cluster.
cluster, err := util.GetOwnerCluster(ctx, r.Client, aksCluster.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if cluster == nil {
log.Info("Cluster Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}

controlPlane := &infrav1exp.AzureManagedControlPlane{}
controlPlaneRef := types.NamespacedName{
Name: cluster.Spec.ControlPlaneRef.Name,
Namespace: cluster.Namespace,
}

log = log.WithValues("cluster", cluster.Name)

if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to get control plane ref")
}

log = log.WithValues("controlPlane", controlPlaneRef.Name)

patchhelper, err := patch.NewHelper(aksCluster, r.Client)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to init patch helper")
}

// Match whatever the control plane says. We should also enqueue
// requests from control plane to infra cluster to keep this accurate
aksCluster.Status.Ready = controlPlane.Status.Ready
aksCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint

if err := patchhelper.Patch(ctx, aksCluster); err != nil {
return reconcile.Result{}, err
}

log.Info("Successfully reconciled")

return reconcile.Result{}, nil
}
161 changes: 161 additions & 0 deletions exp/controllers/azuremanagedmachinepool_controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
"context"

"github.com/go-logr/logr"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-azure/cloud/scope"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3"

"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

// AzureManagedMachinePoolReconciler reconciles a AzureManagedMachinePool object
type AzureManagedMachinePoolReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
}

func (r *AzureManagedMachinePoolReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1exp.AzureManagedMachinePool{}).
Complete(r)
}

// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;patch
// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch

func (r *AzureManagedMachinePoolReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
log := r.Log.WithValues("namespace", req.Namespace, "infraPool", req.Name)

// Fetch the AzureManagedMachinePool instance
infraPool := &infrav1exp.AzureManagedMachinePool{}
err := r.Get(ctx, req.NamespacedName, infraPool)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}

// Fetch the owning MachinePool.
ownerPool, err := getOwnerMachinePool(ctx, r.Client, infraPool.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if ownerPool == nil {
log.Info("MachinePool Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}

// Fetch the Cluster.
ownerCluster, err := util.GetOwnerCluster(ctx, r.Client, ownerPool.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if ownerCluster == nil {
log.Info("Cluster Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}

log = log.WithValues("ownerCluster", ownerCluster.Name)

// Fetch the corresponding control plane which has all the interesting data.
controlPlane := &infrav1exp.AzureManagedControlPlane{}
controlPlaneName := client.ObjectKey{
Namespace: ownerCluster.Spec.ControlPlaneRef.Namespace,
Name: ownerCluster.Spec.ControlPlaneRef.Name,
}
if err := r.Client.Get(ctx, controlPlaneName, controlPlane); err != nil {
return reconcile.Result{}, err
}

// Create the scope.
mcpScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
Client: r.Client,
Logger: log,
ControlPlane: controlPlane,
Cluster: ownerCluster,
MachinePool: ownerPool,
InfraMachinePool: infraPool,
PatchTarget: infraPool,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}

// Handle deleted clusters
if !infraPool.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, mcpScope)
}

// Handle non-deleted clusters
return r.reconcileNormal(ctx, mcpScope)
}

func (r *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) {
scope.Logger.Info("Reconciling AzureManagedMachinePool")

// If the AzureManagedMachinePool doesn't have our finalizer, add it.
controllerutil.AddFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer)
// Register the finalizer immediately to avoid orphaning Azure resources on delete
if err := scope.PatchObject(ctx); err != nil {
return reconcile.Result{}, err
}

if err := newAzureManagedMachinePoolReconciler(scope).Reconcile(ctx, scope); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error creating AzureManagedMachinePool %s/%s", scope.InfraMachinePool.Namespace, scope.InfraMachinePool.Name)
}

// No errors, so mark us ready so the Cluster API Cluster Controller can pull it
scope.InfraMachinePool.Status.Ready = true

return reconcile.Result{}, nil
}

func (r *AzureManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) {
scope.Logger.Info("Reconciling AzureManagedMachinePool delete")

if err := newAzureManagedMachinePoolReconciler(scope).Delete(ctx, scope); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedMachinePool %s/%s", scope.InfraMachinePool.Namespace, scope.InfraMachinePool.Name)
}

// Cluster is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer)

if err := scope.PatchObject(ctx); err != nil {
return reconcile.Result{}, err
}

return reconcile.Result{}, nil
}
Loading

0 comments on commit 451a0ff

Please sign in to comment.