From a21b1fb23f57b3ebeab4cf7c20393fbaf6c1efc5 Mon Sep 17 00:00:00 2001 From: Fabio Rapposelli Date: Wed, 18 Mar 2020 16:01:02 +0100 Subject: [PATCH] :bug: KCP should set a timeout when trying to connect to remote cluster Signed-off-by: Vince Prignano --- .../controllers/kubeadm_control_plane_controller.go | 7 +++++++ controlplane/kubeadm/internal/cluster.go | 8 +++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go index a3745818b3e3..c0790e08d97b 100644 --- a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go +++ b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go @@ -170,6 +170,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(req ctrl.Request) (res ctrl.Re reterr = nil } } + // Always attempt to update status. if err := r.updateStatus(ctx, kcp, cluster); err != nil { logger.Error(err, "Failed to update KubeadmControlPlane Status") @@ -181,6 +182,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(req ctrl.Request) (res ctrl.Re logger.Error(err, "Failed to patch KubeadmControlPlane") reterr = kerrors.NewAggregate([]error{reterr, err}) } + }() if !kcp.ObjectMeta.DeletionTimestamp.IsZero() { @@ -323,6 +325,11 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c kcp.Status.ReadyReplicas = 0 kcp.Status.UnavailableReplicas = replicas + // Return early if the deletion timestamp is set, we don't want to try to connect to the workload cluster. + if !kcp.DeletionTimestamp.IsZero() { + return nil + } + workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) if err != nil { return errors.Wrap(err, "failed to create remote cluster client") diff --git a/controlplane/kubeadm/internal/cluster.go b/controlplane/kubeadm/internal/cluster.go index e0eea2e20fd5..330ac61f1c98 100644 --- a/controlplane/kubeadm/internal/cluster.go +++ b/controlplane/kubeadm/internal/cluster.go @@ -21,6 +21,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -66,18 +67,19 @@ func (m *Management) GetMachinesForCluster(ctx context.Context, cluster client.O // GetWorkloadCluster builds a cluster object. // The cluster comes with an etcd client generator to connect to any etcd pod living on a managed machine. func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) { - // TODO(chuckha): Unroll remote.NewClusterClient if we are unhappy with getting a restConfig twice. // TODO(chuckha): Inject this dependency. // TODO(chuckha): memoize this function. The workload client only exists as long as a reconciliation loop. restConfig, err := remote.RESTConfig(ctx, m.Client, clusterKey) + restConfig.Timeout = 30 * time.Second if err != nil { return nil, err } - c, err := remote.NewClusterClient(ctx, m.Client, clusterKey, scheme.Scheme) + c, err := client.New(restConfig, client.Options{Scheme: scheme.Scheme}) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "failed to create client for workload cluster %v", clusterKey) } + etcdCASecret := &corev1.Secret{} etcdCAObjectKey := ctrlclient.ObjectKey{ Namespace: clusterKey.Namespace,