From a37f57dc72ecb2a6d68139eab1cfca985121fd9a Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Wed, 21 Jun 2023 11:14:20 +0200 Subject: [PATCH] Use rest config from ClusterCacheTracker consistently MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Büringer buringerst@vmware.com --- controlplane/kubeadm/internal/cluster.go | 4 +++- controlplane/kubeadm/internal/cluster_test.go | 19 +++++++++++-------- .../controllers/machine/machine_controller.go | 7 +------ 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/controlplane/kubeadm/internal/cluster.go b/controlplane/kubeadm/internal/cluster.go index 60b71604cff1..521e2df2af10 100644 --- a/controlplane/kubeadm/internal/cluster.go +++ b/controlplane/kubeadm/internal/cluster.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -102,10 +103,11 @@ func (m *Management) GetMachinePoolsForCluster(ctx context.Context, cluster *clu func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) { // TODO(chuckha): Inject this dependency. // TODO(chuckha): memoize this function. The workload client only exists as long as a reconciliation loop. - restConfig, err := remote.RESTConfig(ctx, KubeadmControlPlaneControllerName, m.Client, clusterKey) + restConfig, err := m.Tracker.GetRESTConfig(ctx, clusterKey) if err != nil { return nil, err } + restConfig = rest.CopyConfig(restConfig) restConfig.Timeout = 30 * time.Second if m.Tracker == nil { diff --git a/controlplane/kubeadm/internal/cluster_test.go b/controlplane/kubeadm/internal/cluster_test.go index efe89a367a58..8d36f7ef457d 100644 --- a/controlplane/kubeadm/internal/cluster_test.go +++ b/controlplane/kubeadm/internal/cluster_test.go @@ -105,13 +105,6 @@ func TestGetWorkloadCluster(t *testing.T) { delete(emptyKeyEtcdSecret.Data, secret.TLSKeyDataName) badCrtEtcdSecret := etcdSecret.DeepCopy() badCrtEtcdSecret.Data[secret.TLSCrtDataName] = []byte("bad cert") - tracker, err := remote.NewClusterCacheTracker( - env.Manager, - remote.ClusterCacheTrackerOptions{ - Log: &log.Log, - }, - ) - g.Expect(err).ToNot(HaveOccurred()) // Create kubeconfig secret // Store the envtest config as the contents of the kubeconfig secret. @@ -188,10 +181,20 @@ func TestGetWorkloadCluster(t *testing.T) { for _, o := range tt.objs { g.Expect(env.Client.Create(ctx, o)).To(Succeed()) defer func(do client.Object) { - g.Expect(env.Cleanup(ctx, do)).To(Succeed()) + g.Expect(env.CleanupAndWait(ctx, do)).To(Succeed()) }(o) } + // We have to create a new ClusterCacheTracker for every test case otherwise + // it could still have a rest config from a previous run cached. + tracker, err := remote.NewClusterCacheTracker( + env.Manager, + remote.ClusterCacheTrackerOptions{ + Log: &log.Log, + }, + ) + g.Expect(err).ToNot(HaveOccurred()) + // Note: The API reader is intentionally used instead of the regular (cached) client // to avoid test failures when the local cache isn't able to catch up in time. m := Management{ diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 5b62bbb9a9d5..a38b22c82459 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -56,11 +56,6 @@ import ( "sigs.k8s.io/cluster-api/util/predicates" ) -const ( - // controllerName defines the controller used when creating clients. - controllerName = "machine-controller" -) - var ( errNilNodeRef = errors.New("noderef is nil") errLastControlPlaneNode = errors.New("last control plane member") @@ -579,7 +574,7 @@ func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1 func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx, "Node", klog.KRef("", nodeName)) - restConfig, err := remote.RESTConfig(ctx, controllerName, r.Client, util.ObjectKey(cluster)) + restConfig, err := r.Tracker.GetRESTConfig(ctx, util.ObjectKey(cluster)) if err != nil { log.Error(err, "Error creating a remote client while deleting Machine, won't retry") return ctrl.Result{}, nil