Skip to content

Commit

Permalink
with 'ok-to-delete' annotation
Browse files Browse the repository at this point in the history
  • Loading branch information
Yuvaraj Kakaraparthi committed Jun 28, 2022
1 parent 8b25477 commit f76e203
Show file tree
Hide file tree
Showing 10 changed files with 340 additions and 162 deletions.
3 changes: 0 additions & 3 deletions controllers/alias.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,6 @@ type ClusterReconciler struct {
Client client.Client
APIReader client.Reader

RuntimeClient runtimeclient.Client

// WatchFilterValue is the label value used to filter events prior to reconciliation.
WatchFilterValue string
}
Expand All @@ -53,7 +51,6 @@ func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag
return (&clustercontroller.Reconciler{
Client: r.Client,
APIReader: r.APIReader,
RuntimeClient: r.RuntimeClient,
WatchFilterValue: r.WatchFilterValue,
}).SetupWithManager(ctx, mgr, options)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/hooks"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/predicates"
)
Expand Down Expand Up @@ -91,6 +93,12 @@ func (r *ClusterResourceSetBindingReconciler) Reconcile(ctx context.Context, req
}
// If the owner cluster is in deletion process, delete its ClusterResourceSetBinding
if !cluster.DeletionTimestamp.IsZero() {
if feature.Gates.Enabled(feature.RuntimeSDK) && feature.Gates.Enabled(feature.ClusterTopology) {
if cluster.Spec.Topology != nil && !hooks.IsOkToDelete(cluster) {
// If the Cluster is not yet ready to be deleted then do no delete the ClusterResourceSetBinding.
return ctrl.Result{}, nil
}
}
log.Info("deleting ClusterResourceSetBinding because the owner Cluster is currently being deleted")
return ctrl.Result{}, r.Client.Delete(ctx, binding)
}
Expand Down
4 changes: 4 additions & 0 deletions exp/runtime/api/v1alpha1/extensionconfig_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,4 +210,8 @@ const (
// The annotation will be used to track the intent to call a hook as soon as an operation completes;
// the intent will be removed as soon as the hook call completes successfully.
PendingHooksAnnotation string = "runtime.cluster.x-k8s.io/pending-hooks"

// OkToDeleteAnnotation is the annotation used to indicate if a cluster is ready to be fully deleted.
// This annotation is added to the cluster after the hooks (Ex: BeforeClusterDelete) have passed.
OkToDeleteAnnotation string = "runtime.cluster.x-k8s.io/ok-to-delete"
)
29 changes: 6 additions & 23 deletions internal/controllers/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,8 @@ import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/external"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/hooks"
runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog"
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/collections"
Expand All @@ -71,8 +68,6 @@ type Reconciler struct {
Client client.Client
APIReader client.Reader

RuntimeClient runtimeclient.Client

// WatchFilterValue is the label value used to filter events prior to reconciliation.
WatchFilterValue string

Expand Down Expand Up @@ -221,24 +216,12 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster)
func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)

// Call the BeforeClusterDelete hook to before proceeding further.
if feature.Gates.Enabled(feature.RuntimeSDK) {
// CAll the hook only ifi it is not marked already. If it is already marked it would mean don't need to call it anymore.
if cluster.Spec.Topology != nil && !hooks.IsPending(runtimehooksv1.BeforeClusterDelete, cluster) {
hookRequest := &runtimehooksv1.BeforeClusterDeleteRequest{
Cluster: *cluster,
}
hookResponse := &runtimehooksv1.BeforeClusterDeleteResponse{}
if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterDelete, cluster, hookRequest, hookResponse); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "error calling the %s hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterDelete))
}
if hookResponse.RetryAfterSeconds != 0 {
// Cannot proceed with deleting the cluster yet. Lets requeue to retry at a later time.
return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil
}
// We can proceed with the delete operation. Mark the hook so that we don't call it anymore for this Cluster.
if err := hooks.MarkAsPending(ctx, r.Client, cluster, runtimehooksv1.BeforeClusterDelete); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to mark %s hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterDelete))
// If the RuntimeSDK and ClusterTopology flags are enabled, for clusters with managed topologies
// only proceed with delete if the cluster is marked as `ok-to-delete`
if feature.Gates.Enabled(feature.RuntimeSDK) && feature.Gates.Enabled(feature.ClusterTopology) {
if cluster.Spec.Topology != nil {
if !hooks.IsOkToDelete(cluster) {
return ctrl.Result{}, nil
}
}
}
Expand Down
162 changes: 28 additions & 134 deletions internal/controllers/cluster/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ package cluster

import (
"testing"
"time"

. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/component-base/featuregate/testing"
"k8s.io/utils/pointer"
Expand All @@ -31,11 +31,9 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/hooks"
runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog"
fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
"sigs.k8s.io/cluster-api/internal/test/builder"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
Expand Down Expand Up @@ -358,151 +356,47 @@ func TestClusterReconciler(t *testing.T) {

func TestClusterReconciler_reconcileDelete(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()

catalog := runtimecatalog.New()
_ = runtimehooksv1.AddToCatalog(catalog)

beforeClusterDeleteGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterDelete)
if err != nil {
panic(err)
}

blockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
RetryAfterSeconds: int32(10),
CommonResponse: runtimehooksv1.CommonResponse{
Status: runtimehooksv1.ResponseStatusSuccess,
},
},
}
nonBlockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
RetryAfterSeconds: int32(0),
CommonResponse: runtimehooksv1.CommonResponse{
Status: runtimehooksv1.ResponseStatusSuccess,
},
},
}
failureResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
CommonResponse: runtimehooksv1.CommonResponse{
Status: runtimehooksv1.ResponseStatusFailure,
},
},
}
fakeInfraCluster := builder.InfrastructureCluster("test-ns", "test-cluster").Build()

tests := []struct {
name string
cluster *clusterv1.Cluster
hookResponse *runtimehooksv1.BeforeClusterDeleteResponse
wantHookToBeCalled bool
wantResult ctrl.Result
wantMarked bool
wantErr bool
name string
cluster *clusterv1.Cluster
wantDelete bool
}{
{
name: "should succeed if the BeforeClusterDelete hook returns a non-blocking response",
cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "test-ns",
},
Spec: clusterv1.ClusterSpec{
Topology: &clusterv1.Topology{},
},
},
hookResponse: nonBlockingResponse,
wantResult: ctrl.Result{},
wantHookToBeCalled: true,
wantMarked: true,
wantErr: false,
},
{
name: "should requeue if the BeforeClusterDelete hook returns a blocking response",
cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "test-ns",
},
Spec: clusterv1.ClusterSpec{
Topology: &clusterv1.Topology{},
},
},
hookResponse: blockingResponse,
wantResult: ctrl.Result{RequeueAfter: time.Duration(10) * time.Second},
wantHookToBeCalled: true,
wantMarked: false,
wantErr: false,
},
{
name: "should fail if the BeforeClusterDelete hook returns a failure response",
cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "test-ns",
},
Spec: clusterv1.ClusterSpec{
Topology: &clusterv1.Topology{},
},
},
hookResponse: failureResponse,
wantResult: ctrl.Result{},
wantHookToBeCalled: true,
wantMarked: false,
wantErr: true,
name: "should proceed with delete if the cluster has the ok-to-delete annotation",
cluster: func() *clusterv1.Cluster {
fakeCluster := builder.Cluster("test-ns", "test-cluster").WithTopology(&clusterv1.Topology{}).WithInfrastructureCluster(fakeInfraCluster).Build()
if fakeCluster.Annotations == nil {
fakeCluster.Annotations = map[string]string{}
}
fakeCluster.Annotations[runtimev1.OkToDeleteAnnotation] = ""
return fakeCluster
}(),
wantDelete: true,
},
{
name: "should succeed if the hook is already called",
cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "test-ns",
Annotations: map[string]string{
// If the hook is already marked the hook should not be called during cluster delete.
runtimehooksv1.PendingHooksAnnotation: "BeforeClusterDelete",
},
},
Spec: clusterv1.ClusterSpec{
Topology: &clusterv1.Topology{},
},
},
// Using a blocking response here should not matter as the hook should never be called.
// Using a blocking response to enforce the point.
hookResponse: blockingResponse,
wantResult: ctrl.Result{},
wantHookToBeCalled: false,
wantMarked: true,
wantErr: false,
name: "should not proceed with delete if the cluster does not have the ok-to-delete annotation",
cluster: builder.Cluster("test-ns", "test-cluster").WithTopology(&clusterv1.Topology{}).WithInfrastructureCluster(fakeInfraCluster).Build(),
wantDelete: false,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

fakeClient := fake.NewClientBuilder().WithObjects(tt.cluster).Build()
fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
beforeClusterDeleteGVH: tt.hookResponse,
}).
WithCatalog(catalog).
Build()

fakeClient := fake.NewClientBuilder().WithObjects(fakeInfraCluster, tt.cluster).Build()
r := &Reconciler{
Client: fakeClient,
APIReader: fakeClient,
RuntimeClient: fakeRuntimeClient,
Client: fakeClient,
APIReader: fakeClient,
}

res, err := r.reconcileDelete(ctx, tt.cluster)
if tt.wantErr {
g.Expect(err).NotTo(BeNil())
} else {
g.Expect(err).To(BeNil())
g.Expect(res).To(Equal(tt.wantResult))
g.Expect(hooks.IsPending(runtimehooksv1.BeforeClusterDelete, tt.cluster)).To(Equal(tt.wantMarked))
g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.BeforeClusterDelete) == 1).To(Equal(tt.wantHookToBeCalled))
}
_, _ = r.reconcileDelete(ctx, tt.cluster)
infraCluster := builder.InfrastructureCluster("", "").Build()
err := fakeClient.Get(ctx, client.ObjectKeyFromObject(fakeInfraCluster), infraCluster)
g.Expect(apierrors.IsNotFound(err)).To(Equal(tt.wantDelete))
})
}
}
Expand Down
28 changes: 27 additions & 1 deletion internal/controllers/topology/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ import (
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
"sigs.k8s.io/cluster-api/internal/hooks"
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
Expand Down Expand Up @@ -164,7 +165,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
if !cluster.ObjectMeta.DeletionTimestamp.IsZero() {
// TODO: When external patching is supported, we should handle the deletion
// of those external CRDs we created.
return ctrl.Result{}, nil
return r.reconcileDelete(ctx, cluster)
}

patchHelper, err := patch.NewHelper(cluster, r.Client)
Expand Down Expand Up @@ -336,6 +337,31 @@ func (r *Reconciler) machineDeploymentToCluster(o client.Object) []ctrl.Request
}}
}

func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) {
// Add the `ok-to-delete` annotation if the BeforeClusterDelete hook returns a successful non-blocking response.
// Adding this annotation will trigger the cluster reconciler to proceed with deleting the cluster.
if feature.Gates.Enabled(feature.RuntimeSDK) {
if !hooks.IsOkToDelete(cluster) {
hookRequest := &runtimehooksv1.BeforeClusterDeleteRequest{
Cluster: *cluster,
}
hookResponse := &runtimehooksv1.BeforeClusterDeleteResponse{}
if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterDelete, cluster, hookRequest, hookResponse); err != nil {
return ctrl.Result{}, err
}
if hookResponse.RetryAfterSeconds != 0 {
return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil
}
// The BeforeClusterDelete hook returned a non-blocking response. Now the cluster is ready to be deleted.
// Lets mark the cluster as `ok-to-delete`
if err := hooks.MarkAsOkToDelete(ctx, r.Client, cluster); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to mark %s/%s cluster as ok to delete", cluster.Namespace, cluster.Name)
}
}
}
return ctrl.Result{}, nil
}

// serverSideApplyPatchHelperFactory makes use of managed fields provided by server side apply and is used by the controller.
func serverSideApplyPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
Expand Down
Loading

0 comments on commit f76e203

Please sign in to comment.