-
Notifications
You must be signed in to change notification settings - Fork 431
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
fde4598
commit 451a0ff
Showing
7 changed files
with
805 additions
and
18 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,111 @@ | ||
/* | ||
Copyright 2020 The Kubernetes Authors. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package controllers | ||
|
||
import ( | ||
"context" | ||
|
||
"github.com/go-logr/logr" | ||
"github.com/pkg/errors" | ||
apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
"k8s.io/apimachinery/pkg/types" | ||
"k8s.io/client-go/tools/record" | ||
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" | ||
|
||
"sigs.k8s.io/cluster-api/util" | ||
"sigs.k8s.io/cluster-api/util/patch" | ||
ctrl "sigs.k8s.io/controller-runtime" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
"sigs.k8s.io/controller-runtime/pkg/controller" | ||
"sigs.k8s.io/controller-runtime/pkg/reconcile" | ||
) | ||
|
||
// AzureManagedClusterReconciler reconciles a AzureManagedCluster object | ||
type AzureManagedClusterReconciler struct { | ||
client.Client | ||
Log logr.Logger | ||
Recorder record.EventRecorder | ||
} | ||
|
||
func (r *AzureManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { | ||
return ctrl.NewControllerManagedBy(mgr). | ||
WithOptions(options). | ||
For(&infrav1exp.AzureManagedCluster{}). | ||
Complete(r) | ||
} | ||
|
||
// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters,verbs=get;list;watch;create;update;patch;delete | ||
// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedclusters/status,verbs=get;update;patch | ||
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch | ||
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete | ||
|
||
func (r *AzureManagedClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { | ||
ctx := context.TODO() | ||
log := r.Log.WithValues("namespace", req.Namespace, "aksCluster", req.Name) | ||
|
||
// Fetch the AzureManagedCluster instance | ||
aksCluster := &infrav1exp.AzureManagedCluster{} | ||
err := r.Get(ctx, req.NamespacedName, aksCluster) | ||
if err != nil { | ||
if apierrors.IsNotFound(err) { | ||
return reconcile.Result{}, nil | ||
} | ||
return reconcile.Result{}, err | ||
} | ||
|
||
// Fetch the Cluster. | ||
cluster, err := util.GetOwnerCluster(ctx, r.Client, aksCluster.ObjectMeta) | ||
if err != nil { | ||
return reconcile.Result{}, err | ||
} | ||
if cluster == nil { | ||
log.Info("Cluster Controller has not yet set OwnerRef") | ||
return reconcile.Result{}, nil | ||
} | ||
|
||
controlPlane := &infrav1exp.AzureManagedControlPlane{} | ||
controlPlaneRef := types.NamespacedName{ | ||
Name: cluster.Spec.ControlPlaneRef.Name, | ||
Namespace: cluster.Namespace, | ||
} | ||
|
||
log = log.WithValues("cluster", cluster.Name) | ||
|
||
if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { | ||
return reconcile.Result{}, errors.Wrap(err, "failed to get control plane ref") | ||
} | ||
|
||
log = log.WithValues("controlPlane", controlPlaneRef.Name) | ||
|
||
patchhelper, err := patch.NewHelper(aksCluster, r.Client) | ||
if err != nil { | ||
return reconcile.Result{}, errors.Wrap(err, "failed to init patch helper") | ||
} | ||
|
||
// Match whatever the control plane says. We should also enqueue | ||
// requests from control plane to infra cluster to keep this accurate | ||
aksCluster.Status.Ready = controlPlane.Status.Ready | ||
aksCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint | ||
|
||
if err := patchhelper.Patch(ctx, aksCluster); err != nil { | ||
return reconcile.Result{}, err | ||
} | ||
|
||
log.Info("Successfully reconciled") | ||
|
||
return reconcile.Result{}, nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,161 @@ | ||
/* | ||
Copyright 2020 The Kubernetes Authors. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package controllers | ||
|
||
import ( | ||
"context" | ||
|
||
"github.com/go-logr/logr" | ||
"github.com/pkg/errors" | ||
apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
"k8s.io/client-go/tools/record" | ||
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3" | ||
"sigs.k8s.io/cluster-api-provider-azure/cloud/scope" | ||
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3" | ||
|
||
"sigs.k8s.io/cluster-api/util" | ||
ctrl "sigs.k8s.io/controller-runtime" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
"sigs.k8s.io/controller-runtime/pkg/controller" | ||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" | ||
"sigs.k8s.io/controller-runtime/pkg/reconcile" | ||
) | ||
|
||
// AzureManagedMachinePoolReconciler reconciles a AzureManagedMachinePool object | ||
type AzureManagedMachinePoolReconciler struct { | ||
client.Client | ||
Log logr.Logger | ||
Recorder record.EventRecorder | ||
} | ||
|
||
func (r *AzureManagedMachinePoolReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { | ||
return ctrl.NewControllerManagedBy(mgr). | ||
WithOptions(options). | ||
For(&infrav1exp.AzureManagedMachinePool{}). | ||
Complete(r) | ||
} | ||
|
||
// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools,verbs=get;list;watch;create;update;patch;delete | ||
// +kubebuilder:rbac:groups=exp.infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepools/status,verbs=get;update;patch | ||
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;patch | ||
// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch | ||
|
||
func (r *AzureManagedMachinePoolReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { | ||
ctx := context.TODO() | ||
log := r.Log.WithValues("namespace", req.Namespace, "infraPool", req.Name) | ||
|
||
// Fetch the AzureManagedMachinePool instance | ||
infraPool := &infrav1exp.AzureManagedMachinePool{} | ||
err := r.Get(ctx, req.NamespacedName, infraPool) | ||
if err != nil { | ||
if apierrors.IsNotFound(err) { | ||
return reconcile.Result{}, nil | ||
} | ||
return reconcile.Result{}, err | ||
} | ||
|
||
// Fetch the owning MachinePool. | ||
ownerPool, err := getOwnerMachinePool(ctx, r.Client, infraPool.ObjectMeta) | ||
if err != nil { | ||
return reconcile.Result{}, err | ||
} | ||
if ownerPool == nil { | ||
log.Info("MachinePool Controller has not yet set OwnerRef") | ||
return reconcile.Result{}, nil | ||
} | ||
|
||
// Fetch the Cluster. | ||
ownerCluster, err := util.GetOwnerCluster(ctx, r.Client, ownerPool.ObjectMeta) | ||
if err != nil { | ||
return reconcile.Result{}, err | ||
} | ||
if ownerCluster == nil { | ||
log.Info("Cluster Controller has not yet set OwnerRef") | ||
return reconcile.Result{}, nil | ||
} | ||
|
||
log = log.WithValues("ownerCluster", ownerCluster.Name) | ||
|
||
// Fetch the corresponding control plane which has all the interesting data. | ||
controlPlane := &infrav1exp.AzureManagedControlPlane{} | ||
controlPlaneName := client.ObjectKey{ | ||
Namespace: ownerCluster.Spec.ControlPlaneRef.Namespace, | ||
Name: ownerCluster.Spec.ControlPlaneRef.Name, | ||
} | ||
if err := r.Client.Get(ctx, controlPlaneName, controlPlane); err != nil { | ||
return reconcile.Result{}, err | ||
} | ||
|
||
// Create the scope. | ||
mcpScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ | ||
Client: r.Client, | ||
Logger: log, | ||
ControlPlane: controlPlane, | ||
Cluster: ownerCluster, | ||
MachinePool: ownerPool, | ||
InfraMachinePool: infraPool, | ||
PatchTarget: infraPool, | ||
}) | ||
if err != nil { | ||
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err) | ||
} | ||
|
||
// Handle deleted clusters | ||
if !infraPool.DeletionTimestamp.IsZero() { | ||
return r.reconcileDelete(ctx, mcpScope) | ||
} | ||
|
||
// Handle non-deleted clusters | ||
return r.reconcileNormal(ctx, mcpScope) | ||
} | ||
|
||
func (r *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { | ||
scope.Logger.Info("Reconciling AzureManagedMachinePool") | ||
|
||
// If the AzureManagedMachinePool doesn't have our finalizer, add it. | ||
controllerutil.AddFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer) | ||
// Register the finalizer immediately to avoid orphaning Azure resources on delete | ||
if err := scope.PatchObject(ctx); err != nil { | ||
return reconcile.Result{}, err | ||
} | ||
|
||
if err := newAzureManagedMachinePoolReconciler(scope).Reconcile(ctx, scope); err != nil { | ||
return reconcile.Result{}, errors.Wrapf(err, "error creating AzureManagedMachinePool %s/%s", scope.InfraMachinePool.Namespace, scope.InfraMachinePool.Name) | ||
} | ||
|
||
// No errors, so mark us ready so the Cluster API Cluster Controller can pull it | ||
scope.InfraMachinePool.Status.Ready = true | ||
|
||
return reconcile.Result{}, nil | ||
} | ||
|
||
func (r *AzureManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, scope *scope.ManagedControlPlaneScope) (reconcile.Result, error) { | ||
scope.Logger.Info("Reconciling AzureManagedMachinePool delete") | ||
|
||
if err := newAzureManagedMachinePoolReconciler(scope).Delete(ctx, scope); err != nil { | ||
return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedMachinePool %s/%s", scope.InfraMachinePool.Namespace, scope.InfraMachinePool.Name) | ||
} | ||
|
||
// Cluster is deleted so remove the finalizer. | ||
controllerutil.RemoveFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer) | ||
|
||
if err := scope.PatchObject(ctx); err != nil { | ||
return reconcile.Result{}, err | ||
} | ||
|
||
return reconcile.Result{}, nil | ||
} |
Oops, something went wrong.