Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🌱 Improve dry run for server side apply using an annotation #6709

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions api/v1beta1/common_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,10 @@ const (
// Deprecated: Topology controller is now using server side apply and this annotation will be removed in a future release.
ClusterTopologyManagedFieldsAnnotation = "topology.cluster.x-k8s.io/managed-field-paths"

// ClusterTopologyLastAppliedIntentAnnotation is the annotation used to store the last-applied-intent
// by the topology controller.
ClusterTopologyLastAppliedIntentAnnotation = "topology.cluster.x-k8s.io/last-applied-intent"

// ClusterTopologyMachineDeploymentLabelName is the label set on the generated MachineDeployment objects
// to track the name of the MachineDeployment topology it represents.
ClusterTopologyMachineDeploymentLabelName = "topology.cluster.x-k8s.io/deployment-name"
Expand Down
38 changes: 0 additions & 38 deletions cmd/clusterctl/client/cluster/mover.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ limitations under the License.
package cluster

import (
"context"
"fmt"
"os"
"path/filepath"
Expand All @@ -35,7 +34,6 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
Expand Down Expand Up @@ -853,9 +851,6 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro
// Rebuild the owne reference chain
o.buildOwnerChain(obj, nodeToCreate)

// Save the old managed fields for topology managed fields migration
oldManagedFields := obj.GetManagedFields()

// FIXME Workaround for https://github.com/kubernetes/kubernetes/issues/32220. Remove when the issue is fixed.
// If the resource already exists, the API server ordinarily returns an AlreadyExists error. Due to the above issue, if the resource has a non-empty metadata.generateName field, the API server returns a ServerTimeoutError. To ensure that the API server returns an AlreadyExists error, we set the metadata.generateName field to an empty string.
if len(obj.GetName()) > 0 && len(obj.GetGenerateName()) > 0 {
Expand Down Expand Up @@ -902,10 +897,6 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro
// Stores the newUID assigned to the newly created object.
nodeToCreate.newUID = obj.GetUID()

if err := patchTopologyManagedFields(ctx, oldManagedFields, obj, cTo); err != nil {
return err
}

return nil
}

Expand Down Expand Up @@ -1173,32 +1164,3 @@ func (o *objectMover) checkTargetProviders(toInventory InventoryClient) error {

return kerrors.NewAggregate(errList)
}

// patchTopologyManagedFields patches the managed fields of obj if parts of it are owned by the topology controller.
// This is necessary to ensure the managed fields created by the topology controller are still present and thus to
// prevent unnecessary machine rollouts. Without patching the managed fields, clusterctl would be the owner of the fields
// which would lead to co-ownership and also additional machine rollouts.
func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.ManagedFieldsEntry, obj *unstructured.Unstructured, cTo client.Client) error {
var containsTopologyManagedFields bool
// Check if the object was owned by the topology controller.
for _, m := range oldManagedFields {
if m.Operation == metav1.ManagedFieldsOperationApply &&
m.Manager == structuredmerge.TopologyManagerName &&
m.Subresource == "" {
containsTopologyManagedFields = true
break
}
}
// Return early if the object was not owned by the topology controller.
if !containsTopologyManagedFields {
return nil
}
base := obj.DeepCopy()
obj.SetManagedFields(oldManagedFields)

if err := cTo.Patch(ctx, obj, client.MergeFrom(base)); err != nil {
return errors.Wrapf(err, "error patching managed fields %q %s/%s",
obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
}
return nil
}
14 changes: 14 additions & 0 deletions internal/contract/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,20 @@ func (p Path) Append(k string) Path {
return append(p, k)
}

// Item return a path for an item element, represented as <path> + []. item elements are used as
// a placeholder for schema definitions for list items of for map items.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just that I get it right:

The output of this func for the Path

"spec", "template", "spec"

is a Path

"spec", "template", "spec[]"

I somehow can't map the method name to the functionality. Isn't it rather something like ToArray or AsArray??

Path{"a","b","c"}.ToArray() => Path{"a","b","c[]"}

Should we add a unit test?

func (p Path) Item() Path {
item := Path{}
for i, x := range p {
if i < len(p)-1 {
item = append(item, x)
continue
}
item = append(item, x+"[]")
}
return item
}

// IsParentOf check if one path is Parent of the other.
func (p Path) IsParentOf(other Path) bool {
if len(p) >= len(other) {
Expand Down
12 changes: 7 additions & 5 deletions internal/controllers/topology/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ import (
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge/diff"
runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog"
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
"sigs.k8s.io/cluster-api/util"
Expand Down Expand Up @@ -114,7 +115,8 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt
r.patchEngine = patches.NewEngine(r.RuntimeClient)
r.recorder = mgr.GetEventRecorderFor("topology/cluster")
if r.patchHelperFactory == nil {
r.patchHelperFactory = serverSideApplyPatchHelperFactory(r.Client)
crdSchemaCache := diff.NewCRDSchemaCache(r.Client)
r.patchHelperFactory = serverSideApplyPatchHelperFactory(r.Client, crdSchemaCache)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Should we just do diff.NewCRDSchemaCache(r.Client) in serverSideApplyPatchHelperFactory? The func already has the client and seems like a good way to avoid having to create the CRDSchemaCache on all callsites

}
return nil
}
Expand Down Expand Up @@ -338,15 +340,15 @@ func (r *Reconciler) machineDeploymentToCluster(o client.Object) []ctrl.Request
}

// serverSideApplyPatchHelperFactory makes use of managed fields provided by server side apply and is used by the controller.
func serverSideApplyPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewServerSidePatchHelper(original, modified, c, opts...)
func serverSideApplyPatchHelperFactory(c client.Client, crdSchemaCache diff.CRDSchemaCache) structuredmerge.PatchHelperFactoryFunc {
return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewServerSidePatchHelper(ctx, original, modified, c, crdSchemaCache, opts...)
}
}

// dryRunPatchHelperFactory makes use of a two-ways patch and is used in situations where we cannot rely on managed fields.
func dryRunPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return func(_ context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {

nit

return structuredmerge.NewTwoWaysPatchHelper(original, modified, c, opts...)
}
}
22 changes: 11 additions & 11 deletions internal/controllers/topology/cluster/reconcile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func (r *Reconciler) reconcileClusterShim(ctx context.Context, s *scope.Scope) e
// Note: we are using Patch instead of create for ensuring consistency in managedFields for the entire controller
// but in this case it isn't strictly necessary given that we are not using server side apply for modifying the
// object afterwards.
helper, err := r.patchHelperFactory(nil, shim)
helper, err := r.patchHelperFactory(ctx, nil, shim)
if err != nil {
return errors.Wrap(err, "failed to create the patch helper for the cluster shim object")
}
Expand Down Expand Up @@ -385,7 +385,7 @@ func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, d
// If a current MachineHealthCheck doesn't exist but there is a desired MachineHealthCheck attempt to create.
if current == nil && desired != nil {
log.Infof("Creating %s", tlog.KObj{Obj: desired})
helper, err := r.patchHelperFactory(nil, desired)
helper, err := r.patchHelperFactory(ctx, nil, desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: desired})
}
Expand Down Expand Up @@ -414,7 +414,7 @@ func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, d
// Check differences between current and desired MachineHealthChecks, and patch if required.
// NOTE: we want to be authoritative on the entire spec because the users are
// expected to change MHC fields from the ClusterClass only.
patchHelper, err := r.patchHelperFactory(current, desired)
patchHelper, err := r.patchHelperFactory(ctx, current, desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: current})
}
Expand All @@ -439,7 +439,7 @@ func (r *Reconciler) reconcileCluster(ctx context.Context, s *scope.Scope) error
ctx, log := tlog.LoggerFrom(ctx).WithObject(s.Desired.Cluster).Into(ctx)

// Check differences between current and desired state, and eventually patch the current object.
patchHelper, err := r.patchHelperFactory(s.Current.Cluster, s.Desired.Cluster, structuredmerge.IgnorePaths{
patchHelper, err := r.patchHelperFactory(ctx, s.Current.Cluster, s.Desired.Cluster, structuredmerge.IgnorePaths{
{"spec", "controlPlaneEndpoint"}, // this is a well known field that is managed by the Cluster controller, topology should not express opinions on it.
})
if err != nil {
Expand Down Expand Up @@ -511,7 +511,7 @@ func (r *Reconciler) createMachineDeployment(ctx context.Context, cluster *clust

log = log.WithObject(md.Object)
log.Infof(fmt.Sprintf("Creating %s", tlog.KObj{Obj: md.Object}))
helper, err := r.patchHelperFactory(nil, md.Object)
helper, err := r.patchHelperFactory(ctx, nil, md.Object)
if err != nil {
return createErrorWithoutObjectName(err, md.Object)
}
Expand Down Expand Up @@ -566,7 +566,7 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, cluster *clust

// Check differences between current and desired MachineDeployment, and eventually patch the current object.
log = log.WithObject(desiredMD.Object)
patchHelper, err := r.patchHelperFactory(currentMD.Object, desiredMD.Object)
patchHelper, err := r.patchHelperFactory(ctx, currentMD.Object, desiredMD.Object)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: currentMD.Object})
}
Expand Down Expand Up @@ -659,7 +659,7 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile
// If there is no current object, create it.
if in.current == nil {
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand All @@ -676,7 +676,7 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile
}

// Check differences between current and desired state, and eventually patch the current object.
patchHelper, err := r.patchHelperFactory(in.current, in.desired, structuredmerge.IgnorePaths(in.ignorePaths))
patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired, structuredmerge.IgnorePaths(in.ignorePaths))
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current})
}
Expand Down Expand Up @@ -736,7 +736,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci
// If there is no current object, create the desired object.
if in.current == nil {
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand All @@ -757,7 +757,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci
}

// Check differences between current and desired objects, and if there are changes eventually start the template rotation.
patchHelper, err := r.patchHelperFactory(in.current, in.desired)
patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current})
}
Expand Down Expand Up @@ -788,7 +788,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci

log.Infof("Rotating %s, new name %s", tlog.KObj{Obj: in.current}, newName)
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand Down
Loading