diff --git a/pkg/controller/poolcoordinator/cert/certificate.go b/pkg/controller/poolcoordinator/cert/certificate.go index 5beb1d3aefe..eb3fdad2a40 100644 --- a/pkg/controller/poolcoordinator/cert/certificate.go +++ b/pkg/controller/poolcoordinator/cert/certificate.go @@ -151,8 +151,10 @@ func loadCertAndKeyFromSecret(clientSet client.Interface, certConf CertConfig) ( if err != nil { return nil, nil, errors.Wrapf(err, "couldn't parse the kubeconfig file in the %s secret", secretName) } - authInfo := kubeConfig.AuthInfos[certConf.CommonName] - + authInfo := kubeconfig.GetAuthInfoFromKubeConfig(kubeConfig) + if authInfo == nil { + return nil, nil, errors.Errorf("auth info is not found in secret(%s)", secretName) + } certBytes = authInfo.ClientCertificateData keyBytes = authInfo.ClientKeyData } else { @@ -293,7 +295,7 @@ func GetPrivateKeyFromTLSCert(cert *tls.Certificate) (keyPEM []byte, err error) return keyutil.MarshalPrivateKeyToPEM(cert.PrivateKey) } -// get certificate & private key (in PEM format) from certmanager +// GetCertAndKeyFromCertMgr will get certificate & private key (in PEM format) from certmanager func GetCertAndKeyFromCertMgr(certManager certificate.Manager, stopCh <-chan struct{}) (key []byte, cert []byte, err error) { // waiting for the certificate is generated certManager.Start() @@ -327,7 +329,7 @@ func GetCertAndKeyFromCertMgr(certManager certificate.Manager, stopCh <-chan str return } -// write cert&key pair generated from certManager into a secret +// WriteCertIntoSecret will write cert&key pair generated from certManager into a secret func WriteCertIntoSecret(clientSet client.Interface, certName, secretName string, certManager certificate.Manager, stopCh <-chan struct{}) error { keyPEM, certPEM, err := GetCertAndKeyFromCertMgr(certManager, stopCh) @@ -354,7 +356,7 @@ func WriteCertIntoSecret(clientSet client.Interface, certName, secretName string return nil } -// write cert&key into secret +// WriteCertAndKeyIntoSecret is used for writing cert&key into secret // Notice: if cert OR key is nil, it will be ignored func WriteCertAndKeyIntoSecret(clientSet client.Interface, certName, secretName string, cert *x509.Certificate, key crypto.Signer) error { // write certificate data into secret diff --git a/pkg/controller/poolcoordinator/cert/certificate_test.go b/pkg/controller/poolcoordinator/cert/certificate_test.go index 1f61f2f3073..546111d45b6 100644 --- a/pkg/controller/poolcoordinator/cert/certificate_test.go +++ b/pkg/controller/poolcoordinator/cert/certificate_test.go @@ -229,7 +229,7 @@ contexts: - context: cluster: cluster user: openyurt:pool-coordinator:monitoring - name: openyurt:pool-coordinator:monitoring@cluster + name: openyurt:pool-coordinator:monitoring@cluster current-context: openyurt:pool-coordinator:monitoring@cluster kind: Config users: diff --git a/pkg/controller/poolcoordinator/cert/poolcoordinatorcert_controller.go b/pkg/controller/poolcoordinator/cert/poolcoordinatorcert_controller.go index 9a92f020f2a..32eac30315c 100644 --- a/pkg/controller/poolcoordinator/cert/poolcoordinatorcert_controller.go +++ b/pkg/controller/poolcoordinator/cert/poolcoordinatorcert_controller.go @@ -26,12 +26,17 @@ import ( "github.com/pkg/errors" certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" client "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" appconfig "github.com/openyurtio/openyurt/cmd/yurt-manager/app/config" certfactory "github.com/openyurtio/openyurt/pkg/util/certmanager/factory" @@ -39,16 +44,16 @@ import ( ) func init() { - flag.IntVar(&concurrentReconciles, "poolcoordinatorcert-workers", concurrentReconciles, "Max concurrent workers for Poolcoordinatorcert controller.") + flag.IntVar(&concurrentReconciles, "poolcoordinatorcert-workers", concurrentReconciles, "Max concurrent workers for PoolCoordinatorCert controller.") } var ( - concurrentReconciles = 3 + concurrentReconciles = 1 PoolcoordinatorNS = "kube-system" ) const ( - controllerName = "Poolcoordinatorcert-controller" + controllerName = "PoolCoordinatorCert-controller" // tmp file directory for certmanager to write cert files certDir = "/tmp" @@ -68,22 +73,22 @@ const ( // - apiserver-kubelet-client.crt (not self signed) // - apiserver-kubelet-client.key (not self signed) // - admin.conf (kube-config) - PoolcoordinatorStaticSecertName = "pool-coordinator-static-certs" + PoolcoordinatorStaticSecretName = "pool-coordinator-static-certs" // Dynamic certs will not be shared among clients or servers, contains: // - apiserver.crt // - apiserver.key // - etcd-server.crt // - etcd-server.key - // todo: currently we only create one copy, this will be refined in the future to assign customized certs for differnet nodepools - PoolcoordinatorDynamicSecertName = "pool-coordinator-dynamic-certs" + // todo: currently we only create one copy, this will be refined in the future to assign customized certs for different nodepools + PoolcoordinatorDynamicSecretName = "pool-coordinator-dynamic-certs" // Yurthub certs shared by all yurthub, contains: // - ca.crt // - pool-coordinator-yurthub-client.crt // - pool-coordinator-yurthub-client.key - PoolcoordinatorYurthubClientSecertName = "pool-coordinator-yurthub-certs" + PoolcoordinatorYurthubClientSecretName = "pool-coordinator-yurthub-certs" // Monitoring kubeconfig contains: monitoring kubeconfig for poolcoordinator // - kubeconfig - PoolcoordinatorMonitoringKubeconfigSecertName = "pool-coordinator-monitoring-kubeconfig" + PoolcoordinatorMonitoringKubeconfigSecretName = "pool-coordinator-monitoring-kubeconfig" PoolcoordinatorOrg = "openyurt:pool-coordinator" PoolcoordinatorAdminOrg = "system:masters" @@ -91,7 +96,6 @@ const ( PoolcoordinatorAPIServerCN = "openyurt:pool-coordinator:apiserver" PoolcoordinatorNodeLeaseProxyClientCN = "openyurt:pool-coordinator:node-lease-proxy-client" PoolcoordinatorETCDCN = "openyurt:pool-coordinator:etcd" - PoolcoordinatorYurthubClientCN = "openyurt:pool-coordinator:yurthub" KubeConfigMonitoringClientCN = "openyurt:pool-coordinator:monitoring" KubeConfigAdminClientCN = "cluster-admin" ) @@ -112,78 +116,86 @@ type CertConfig struct { DNSNames []string IPs []net.IP - // certInit is used for initilize those attrs which has to be determined dynamically + // certInit is used for initialize those attrs which has to be determined dynamically // e.g. TLS server cert's IP & DNSNames certInit certInitFunc } -var allSelfSignedCerts []CertConfig = []CertConfig{ - { - CertName: "apiserver-etcd-client", - SecretName: PoolcoordinatorStaticSecertName, - IsKubeConfig: false, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - CommonName: PoolcoordinatorETCDCN, - Organization: []string{PoolcoordinatorOrg}, - }, - { - CertName: "pool-coordinator-yurthub-client", - SecretName: PoolcoordinatorYurthubClientSecertName, - IsKubeConfig: false, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - CommonName: KubeConfigAdminClientCN, - Organization: []string{PoolcoordinatorAdminOrg}, - }, - { - CertName: "apiserver", - SecretName: PoolcoordinatorDynamicSecertName, - IsKubeConfig: false, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - CommonName: PoolcoordinatorAPIServerCN, - Organization: []string{PoolcoordinatorOrg}, - certInit: func(i client.Interface, c <-chan struct{}) ([]net.IP, []string, error) { - return waitUntilSVCReady(i, PoolcoordinatorAPIServerSVC, c) +var ( + allIndependentCerts = []CertConfig{ + { + CertName: "apiserver-etcd-client", + SecretName: PoolcoordinatorStaticSecretName, + IsKubeConfig: false, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + CommonName: PoolcoordinatorETCDCN, + Organization: []string{PoolcoordinatorOrg}, }, - }, - { - CertName: "etcd-server", - SecretName: PoolcoordinatorDynamicSecertName, - IsKubeConfig: false, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - IPs: []net.IP{ - net.ParseIP("127.0.0.1"), + { + CertName: "pool-coordinator-yurthub-client", + SecretName: PoolcoordinatorYurthubClientSecretName, + IsKubeConfig: false, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + CommonName: KubeConfigAdminClientCN, + Organization: []string{PoolcoordinatorAdminOrg}, }, - CommonName: PoolcoordinatorETCDCN, - Organization: []string{PoolcoordinatorOrg}, - certInit: func(i client.Interface, c <-chan struct{}) ([]net.IP, []string, error) { - return waitUntilSVCReady(i, PoolcoordinatorETCDSVC, c) + } + + certsDependOnETCDSvc = []CertConfig{ + { + CertName: "etcd-server", + SecretName: PoolcoordinatorDynamicSecretName, + IsKubeConfig: false, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + IPs: []net.IP{ + net.ParseIP("127.0.0.1"), + }, + CommonName: PoolcoordinatorETCDCN, + Organization: []string{PoolcoordinatorOrg}, + certInit: func(i client.Interface, c <-chan struct{}) ([]net.IP, []string, error) { + return waitUntilSVCReady(i, PoolcoordinatorETCDSVC, c) + }, + }, + } + + certsDependOnAPIServerSvc = []CertConfig{ + { + CertName: "apiserver", + SecretName: PoolcoordinatorDynamicSecretName, + IsKubeConfig: false, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + CommonName: PoolcoordinatorAPIServerCN, + Organization: []string{PoolcoordinatorOrg}, + certInit: func(i client.Interface, c <-chan struct{}) ([]net.IP, []string, error) { + return waitUntilSVCReady(i, PoolcoordinatorAPIServerSVC, c) + }, }, - }, - { - CertName: "kubeconfig", - SecretName: PoolcoordinatorMonitoringKubeconfigSecertName, - IsKubeConfig: true, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - CommonName: KubeConfigMonitoringClientCN, - Organization: []string{PoolcoordinatorOrg}, - // As a clientAuth cert, kubeconfig cert don't need IP&DNS to work, - // but kubeconfig need this extra information to verify if it's out of date - certInit: func(i client.Interface, c <-chan struct{}) ([]net.IP, []string, error) { - return waitUntilSVCReady(i, PoolcoordinatorAPIServerSVC, c) + { + CertName: "kubeconfig", + SecretName: PoolcoordinatorMonitoringKubeconfigSecretName, + IsKubeConfig: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + CommonName: KubeConfigMonitoringClientCN, + Organization: []string{PoolcoordinatorOrg}, + // As a clientAuth cert, kubeconfig cert don't need IP&DNS to work, + // but kubeconfig need this extra information to verify if it's out of date + certInit: func(i client.Interface, c <-chan struct{}) ([]net.IP, []string, error) { + return waitUntilSVCReady(i, PoolcoordinatorAPIServerSVC, c) + }, }, - }, - { - CertName: "admin.conf", - SecretName: PoolcoordinatorStaticSecertName, - IsKubeConfig: true, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - CommonName: KubeConfigAdminClientCN, - Organization: []string{PoolcoordinatorAdminOrg}, - certInit: func(i client.Interface, c <-chan struct{}) ([]net.IP, []string, error) { - return waitUntilSVCReady(i, PoolcoordinatorAPIServerSVC, c) + { + CertName: "admin.conf", + SecretName: PoolcoordinatorStaticSecretName, + IsKubeConfig: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + CommonName: KubeConfigAdminClientCN, + Organization: []string{PoolcoordinatorAdminOrg}, + certInit: func(i client.Interface, c <-chan struct{}) ([]net.IP, []string, error) { + return waitUntilSVCReady(i, PoolcoordinatorAPIServerSVC, c) + }, }, - }, -} + } +) func Format(format string, args ...interface{}) string { s := fmt.Sprintf(format, args...) @@ -192,11 +204,11 @@ func Format(format string, args ...interface{}) string { // Add creates a new Poolcoordinatorcert Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func Add(c *appconfig.CompletedConfig, mgr manager.Manager) error { - r := &ReconcilePoolcoordinatorcert{} +func Add(cfg *appconfig.CompletedConfig, mgr manager.Manager) error { + r := &ReconcilePoolCoordinatorCert{} // Create a new controller - _, err := controller.New(controllerName, mgr, controller.Options{ + c, err := controller.New(controllerName, mgr, controller.Options{ Reconciler: r, MaxConcurrentReconciles: concurrentReconciles, }) if err != nil { @@ -204,34 +216,88 @@ func Add(c *appconfig.CompletedConfig, mgr manager.Manager) error { } // init global variables - cfg := c.ComponentConfig.Generic - PoolcoordinatorNS = cfg.WorkingNamespace + PoolcoordinatorNS = cfg.ComponentConfig.Generic.WorkingNamespace - // init PoolCoordinator - // prepare some necessary assets (CA, certs, kubeconfigs) for pool-coordinator - err = initPoolCoordinator(r.Client, nil) + // prepare ca certs for pool coordinator + caCert, caKey, reuseCA, err := initCA(r.kubeClient) if err != nil { + return errors.Wrap(err, "init poolcoordinator failed") + } + r.caCert = caCert + r.caKey = caKey + r.reuseCA = reuseCA + + // prepare all independent certs + if err := r.initPoolCoordinator(allIndependentCerts, nil); err != nil { return err } - return nil + // prepare ca cert in static secret + if err := WriteCertAndKeyIntoSecret(r.kubeClient, "ca", PoolcoordinatorStaticSecretName, r.caCert, nil); err != nil { + return err + } + + // prepare ca cert in yurthub secret + if err := WriteCertAndKeyIntoSecret(r.kubeClient, "ca", PoolcoordinatorYurthubClientSecretName, r.caCert, nil); err != nil { + return err + } + + // prepare sa key pairs + if err := initSAKeyPair(r.kubeClient, "sa", PoolcoordinatorStaticSecretName); err != nil { + return err + } + + // watch pool coordinator service + svcReadyPredicates := predicate.Funcs{ + CreateFunc: func(evt event.CreateEvent) bool { + if svc, ok := evt.Object.(*corev1.Service); ok { + return isPoolCoordinatorSvc(svc) + } + return false + }, + UpdateFunc: func(evt event.UpdateEvent) bool { + if svc, ok := evt.ObjectNew.(*corev1.Service); ok { + return isPoolCoordinatorSvc(svc) + } + return false + }, + DeleteFunc: func(evt event.DeleteEvent) bool { + return false + }, + } + return c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForObject{}, svcReadyPredicates) } -var _ reconcile.Reconciler = &ReconcilePoolcoordinatorcert{} +func isPoolCoordinatorSvc(svc *corev1.Service) bool { + if svc == nil { + return false + } + + if svc.Namespace == PoolcoordinatorNS && (svc.Name == PoolcoordinatorAPIServerSVC || svc.Name == PoolcoordinatorETCDSVC) { + return true + } -// ReconcilePoolcoordinatorcert reconciles a Poolcoordinatorcert object -type ReconcilePoolcoordinatorcert struct { - Client client.Interface + return false } -// InjectConfig -func (r *ReconcilePoolcoordinatorcert) InjectConfig(cfg *rest.Config) error { - client, err := client.NewForConfig(cfg) +var _ reconcile.Reconciler = &ReconcilePoolCoordinatorCert{} + +// ReconcilePoolCoordinatorCert reconciles a Poolcoordinatorcert object +type ReconcilePoolCoordinatorCert struct { + kubeClient client.Interface + caCert *x509.Certificate + caKey crypto.Signer + reuseCA bool +} + +// InjectConfig will prepare kube client for PoolCoordinatorCert +func (r *ReconcilePoolCoordinatorCert) InjectConfig(cfg *rest.Config) error { + kubeClient, err := client.NewForConfig(cfg) if err != nil { klog.Errorf("failed to create kube client, %v", err) return err } - r.Client = client + r.kubeClient = kubeClient return nil } @@ -240,46 +306,56 @@ func (r *ReconcilePoolcoordinatorcert) InjectConfig(cfg *rest.Config) error { // +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;watch;list // todo: make customized certificate for each poolcoordinator pod -func (r *ReconcilePoolcoordinatorcert) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcilePoolCoordinatorCert) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { // Note !!!!!!!!!! // We strongly recommend use Format() to encapsulation because Format() can print logs by module // @kadisi - klog.Infof(Format("Reconcile Poolcoordinatorcert %s/%s", request.Namespace, request.Name)) + klog.Infof(Format("Reconcile PoolCoordinatorCert %s/%s", request.Namespace, request.Name)) + // 1. prepare apiserver-kubelet-client cert + if err := initAPIServerClientCert(r.kubeClient, ctx.Done()); err != nil { + return reconcile.Result{}, err + } + + // 2. prepare node-lease-proxy-client cert + if err := initNodeLeaseProxyClient(r.kubeClient, ctx.Done()); err != nil { + return reconcile.Result{}, err + } + // 3. prepare certs based on service + if request.NamespacedName.Namespace == PoolcoordinatorNS { + if request.NamespacedName.Name == PoolcoordinatorAPIServerSVC { + return reconcile.Result{}, r.initPoolCoordinator(certsDependOnAPIServerSvc, ctx.Done()) + } else if request.NamespacedName.Name == PoolcoordinatorETCDSVC { + return reconcile.Result{}, r.initPoolCoordinator(certsDependOnETCDSvc, ctx.Done()) + } + } return reconcile.Result{}, nil } -func initPoolCoordinator(clientSet client.Interface, stopCh <-chan struct{}) error { +func (r *ReconcilePoolCoordinatorCert) initPoolCoordinator(allSelfSignedCerts []CertConfig, stopCh <-chan struct{}) error { klog.Infof(Format("init poolcoordinator started")) - - // Prepare CA certs - caCert, caKey, reuseCA, err := initCA(clientSet) - if err != nil { - return errors.Wrap(err, "init poolcoordinator failed") - } - // Prepare certs used by poolcoordinators - // 1. prepare selfsigned certs + // prepare selfsigned certs var selfSignedCerts []CertConfig - if reuseCA { + if r.reuseCA { // if CA is reused // then we can check if there are selfsigned certs can be reused too for _, certConf := range allSelfSignedCerts { // 1.1 check if cert exist - cert, _, err := loadCertAndKeyFromSecret(clientSet, certConf) + cert, _, err := loadCertAndKeyFromSecret(r.kubeClient, certConf) if err != nil { klog.Infof(Format("can not load cert %s from %s secret", certConf.CertName, certConf.SecretName)) selfSignedCerts = append(selfSignedCerts, certConf) continue } - // 1.2 check if cert is autorized by current CA - if !IsCertFromCA(cert, caCert) { + // 1.2 check if cert is authorized by current CA + if !IsCertFromCA(cert, r.caCert) { klog.Infof(Format("existing cert %s is not authorized by current CA", certConf.CertName)) selfSignedCerts = append(selfSignedCerts, certConf) continue @@ -288,7 +364,7 @@ func initPoolCoordinator(clientSet client.Interface, stopCh <-chan struct{}) err // 1.3 check has dynamic attrs changed if certConf.certInit != nil { // receive dynamic IP addresses - ips, _, err := certConf.certInit(clientSet, stopCh) + ips, _, err := certConf.certInit(r.kubeClient, stopCh) if err != nil { // if cert init failed, skip this cert klog.Errorf(Format("fail to init cert %s when checking dynamic attrs: %v", certConf.CertName, err)) @@ -311,44 +387,19 @@ func initPoolCoordinator(clientSet client.Interface, stopCh <-chan struct{}) err selfSignedCerts = allSelfSignedCerts } - // create self signed certs + // create selfsigned certs for _, certConf := range selfSignedCerts { - if err := initPoolCoordinatorCert(clientSet, certConf, caCert, caKey, stopCh); err != nil { + if err := initPoolCoordinatorCert(r.kubeClient, certConf, r.caCert, r.caKey, stopCh); err != nil { klog.Errorf(Format("create cert %s fail: %v", certConf.CertName, err)) return err } } - // 2. prepare apiserver-kubelet-client cert - if err := initAPIServerClientCert(clientSet, stopCh); err != nil { - return err - } - - // 3. prepare node-lease-proxy-client cert - if err := initNodeLeaseProxyClient(clientSet, stopCh); err != nil { - return err - } - - // 4. prepare ca cert in static secret - if err := WriteCertAndKeyIntoSecret(clientSet, "ca", PoolcoordinatorStaticSecertName, caCert, nil); err != nil { - return err - } - - // 5. prepare ca cert in yurthub secret - if err := WriteCertAndKeyIntoSecret(clientSet, "ca", PoolcoordinatorYurthubClientSecertName, caCert, nil); err != nil { - return err - } - - // 6. prepare sa key pairs - if err := initSAKeyPair(clientSet, "sa", PoolcoordinatorStaticSecertName); err != nil { - return err - } - return nil } -// Prepare CA certs, -// check if pool-coordinator CA already exist, if not creat one +// initCA is used for preparing CA certs, +// check if pool-coordinator CA already exist, if not create one func initCA(clientSet client.Interface) (caCert *x509.Certificate, caKey crypto.Signer, reuse bool, err error) { // try load CA cert&key from secret caCert, caKey, err = loadCertAndKeyFromSecret(clientSet, CertConfig{ @@ -362,13 +413,12 @@ func initCA(clientSet client.Interface) (caCert *x509.Certificate, caKey crypto. klog.Info(Format("CA already exist in secret, reuse it")) return caCert, caKey, true, nil } else { - // if not exist - // create new CA certs - klog.Infof(Format("fail to get CA from secret: %v, create new CA", err)) + // if ca secret does not exist, create new CA certs + klog.Infof(Format("secret(%s/%s) is not found, create new CA", PoolcoordinatorNS, PoolCoordinatorCASecretName)) // write it into the secret caCert, caKey, err = NewSelfSignedCA() if err != nil { - return nil, nil, false, errors.Wrap(err, "fail to write CA assets into secret when initializing poolcoordinator") + return nil, nil, false, errors.Wrap(err, "fail to new self CA assets when initializing poolcoordinator") } err = WriteCertAndKeyIntoSecret(clientSet, "ca", PoolCoordinatorCASecretName, caCert, caKey) @@ -376,10 +426,22 @@ func initCA(clientSet client.Interface) (caCert *x509.Certificate, caKey crypto. return nil, nil, false, errors.Wrap(err, "fail to write CA assets into secret when initializing poolcoordinator") } } + return caCert, caKey, false, nil } -func initAPIServerClientCert(clientSet client.Interface, stopCh <-chan struct{}) (err error) { +func initAPIServerClientCert(clientSet client.Interface, stopCh <-chan struct{}) error { + if cert, _, err := loadCertAndKeyFromSecret(clientSet, CertConfig{ + SecretName: PoolcoordinatorStaticSecretName, + CertName: "apiserver-kubelet-client", + IsKubeConfig: false, + }); cert != nil { + klog.Infof("apiserver-kubelet-client cert has already existed in secret %s", PoolcoordinatorStaticSecretName) + return nil + } else if err != nil { + klog.Errorf("fail to get apiserver-kubelet-client cert in secret(%s), %v, and new cert will be created", PoolcoordinatorStaticSecretName, err) + } + certMgr, err := certfactory.NewCertManagerFactory(clientSet).New(&certfactory.CertManagerConfig{ CertDir: certDir, ComponentName: fmt.Sprintf("%s-%s", ComponentName, "apiserver-client"), @@ -392,10 +454,21 @@ func initAPIServerClientCert(clientSet client.Interface, stopCh <-chan struct{}) return err } - return WriteCertIntoSecret(clientSet, "apiserver-kubelet-client", PoolcoordinatorStaticSecertName, certMgr, stopCh) + return WriteCertIntoSecret(clientSet, "apiserver-kubelet-client", PoolcoordinatorStaticSecretName, certMgr, stopCh) } func initNodeLeaseProxyClient(clientSet client.Interface, stopCh <-chan struct{}) error { + if cert, _, err := loadCertAndKeyFromSecret(clientSet, CertConfig{ + SecretName: PoolcoordinatorYurthubClientSecretName, + CertName: "node-lease-proxy-client", + IsKubeConfig: false, + }); cert != nil { + klog.Infof("node-lease-proxy-client cert has already existed in secret %s", PoolcoordinatorYurthubClientSecretName) + return nil + } else if err != nil { + klog.Errorf("fail to get node-lease-proxy-client cert in secret(%s), %v, and new cert will be created", PoolcoordinatorYurthubClientSecretName, err) + } + certMgr, err := certfactory.NewCertManagerFactory(clientSet).New(&certfactory.CertManagerConfig{ CertDir: certDir, ComponentName: "yurthub", @@ -407,7 +480,7 @@ func initNodeLeaseProxyClient(clientSet client.Interface, stopCh <-chan struct{} return err } - return WriteCertIntoSecret(clientSet, "node-lease-proxy-client", PoolcoordinatorYurthubClientSecertName, certMgr, stopCh) + return WriteCertIntoSecret(clientSet, "node-lease-proxy-client", PoolcoordinatorYurthubClientSecretName, certMgr, stopCh) } // create new public/private key pair for signing service account users diff --git a/pkg/controller/poolcoordinator/cert/poolcoordinatorcert_controller_test.go b/pkg/controller/poolcoordinator/cert/poolcoordinatorcert_controller_test.go index 7bbcb9a0c9d..5f995fe2b57 100644 --- a/pkg/controller/poolcoordinator/cert/poolcoordinatorcert_controller_test.go +++ b/pkg/controller/poolcoordinator/cert/poolcoordinatorcert_controller_test.go @@ -62,6 +62,11 @@ func TestInitCA(t *testing.T) { }), false, }, + { + "secret does not exist", + fake.NewSimpleClientset(), + false, + }, } for _, tt := range tests { diff --git a/pkg/controller/poolcoordinator/cert/secret.go b/pkg/controller/poolcoordinator/cert/secret.go index 2bb49e66542..025e04bfbf9 100644 --- a/pkg/controller/poolcoordinator/cert/secret.go +++ b/pkg/controller/poolcoordinator/cert/secret.go @@ -54,7 +54,7 @@ func NewSecretClient(clientSet client.Interface, ns, name string) (*SecretClient // if this secret already exist, reuse it if kerrors.IsAlreadyExists(err) { secret, _ = clientSet.CoreV1().Secrets(ns).Get(context.TODO(), name, metav1.GetOptions{}) - klog.V(4).Infof(Format("secret %s already exisit", secret.Name)) + klog.V(4).Infof(Format("secret %s already exists", secret.Name)) } else { return nil, fmt.Errorf("create secret client %s fail: %v", name, err) } diff --git a/pkg/util/kubeconfig/kubeconfig.go b/pkg/util/kubeconfig/kubeconfig.go index 221ded8d3ba..9625d2d3351 100644 --- a/pkg/util/kubeconfig/kubeconfig.go +++ b/pkg/util/kubeconfig/kubeconfig.go @@ -113,3 +113,15 @@ func GetClusterFromKubeConfig(config *clientcmdapi.Config) *clientcmdapi.Cluster } return nil } + +// GetAuthInfoFromKubeConfig returns the default AuthInfo of the specified KubeConfig +func GetAuthInfoFromKubeConfig(config *clientcmdapi.Config) *clientcmdapi.AuthInfo { + // If there is an unnamed cluster object, use it + if config.AuthInfos[""] != nil { + return config.AuthInfos[""] + } + if config.Contexts[config.CurrentContext] != nil { + return config.AuthInfos[config.Contexts[config.CurrentContext].AuthInfo] + } + return nil +}