diff --git a/cmd/yurt-iot-dock/app/core.go b/cmd/yurt-iot-dock/app/core.go index 061dedbfe9e..ce4056db75a 100644 --- a/cmd/yurt-iot-dock/app/core.go +++ b/cmd/yurt-iot-dock/app/core.go @@ -97,22 +97,21 @@ func Run(opts *options.YurtIoTDockOptions, stopCh <-chan struct{}) { // perform preflight check setupLog.Info("[preflight] Running pre-flight checks") if err := preflightCheck(mgr, opts); err != nil { - setupLog.Error(err, "failed to run pre-flight checks") + setupLog.Error(err, "could not run pre-flight checks") os.Exit(1) } // register the field indexers setupLog.Info("[preflight] Registering the field indexers") if err := util.RegisterFieldIndexers(mgr.GetFieldIndexer()); err != nil { - setupLog.Error(err, "failed to register field indexers") + setupLog.Error(err, "could not register field indexers") os.Exit(1) } - // get nodepool where yurt-iot-dock run if opts.Nodepool == "" { opts.Nodepool, err = util.GetNodePool(mgr.GetConfig()) if err != nil { - setupLog.Error(err, "failed to get the nodepool where yurt-iot-dock run") + setupLog.Error(err, "could not get the nodepool where yurt-iot-dock run") os.Exit(1) } } @@ -188,7 +187,7 @@ func Run(opts *options.YurtIoTDockOptions, stopCh <-chan struct{}) { setupLog.Info("[run controllers] Starting manager, acting on " + fmt.Sprintf("[NodePool: %s, Namespace: %s]", opts.Nodepool, opts.Namespace)) if err := mgr.Start(SetupSignalHandler(mgr.GetClient(), opts)); err != nil { - setupLog.Error(err, "failed to running manager") + setupLog.Error(err, "could not running manager") os.Exit(1) } } @@ -196,19 +195,19 @@ func Run(opts *options.YurtIoTDockOptions, stopCh <-chan struct{}) { func deleteCRsOnControllerShutdown(ctx context.Context, cli client.Client, opts *options.YurtIoTDockOptions) error { setupLog.Info("[deleteCRsOnControllerShutdown] start delete device crd") if err := controllers.DeleteDevicesOnControllerShutdown(ctx, cli, opts); err != nil { - setupLog.Error(err, "failed to shutdown device cr") + setupLog.Error(err, "could not shutdown device cr") return err } setupLog.Info("[deleteCRsOnControllerShutdown] start delete deviceprofile crd") if err := controllers.DeleteDeviceProfilesOnControllerShutdown(ctx, cli, opts); err != nil { - setupLog.Error(err, "failed to shutdown deviceprofile cr") + setupLog.Error(err, "could not shutdown deviceprofile cr") return err } setupLog.Info("[deleteCRsOnControllerShutdown] start delete deviceservice crd") if err := controllers.DeleteDeviceServicesOnControllerShutdown(ctx, cli, opts); err != nil { - setupLog.Error(err, "failed to shutdown deviceservice cr") + setupLog.Error(err, "could not shutdown deviceservice cr") return err } diff --git a/cmd/yurt-node-servant/config/config.go b/cmd/yurt-node-servant/config/config.go index dd87927c2a9..702cd7c511d 100644 --- a/cmd/yurt-node-servant/config/config.go +++ b/cmd/yurt-node-servant/config/config.go @@ -65,7 +65,7 @@ func newCmdConfigControlPlane() *cobra.Command { return err } if err := runner.Do(); err != nil { - return fmt.Errorf("failed to config control-plane, %v", err) + return fmt.Errorf("could not config control-plane, %v", err) } klog.Info("node-servant config control-plane success") diff --git a/cmd/yurt-node-servant/convert/convert.go b/cmd/yurt-node-servant/convert/convert.go index e732f3267f1..948eeba81c1 100644 --- a/cmd/yurt-node-servant/convert/convert.go +++ b/cmd/yurt-node-servant/convert/convert.go @@ -49,7 +49,7 @@ func NewConvertCmd() *cobra.Command { converter := nodeconverter.NewConverterWithOptions(o) if err := converter.Do(); err != nil { - klog.Fatalf("fail to convert the kubernetes node to a yurt node: %s", err) + klog.Fatalf("could not convert the kubernetes node to a yurt node: %s", err) } klog.Info("convert success") }, diff --git a/cmd/yurt-node-servant/revert/revert.go b/cmd/yurt-node-servant/revert/revert.go index ff8e02d597c..c85578c0e10 100644 --- a/cmd/yurt-node-servant/revert/revert.go +++ b/cmd/yurt-node-servant/revert/revert.go @@ -31,12 +31,12 @@ func NewRevertCmd() *cobra.Command { Short: "", Run: func(cmd *cobra.Command, args []string) { if err := o.Complete(cmd.Flags()); err != nil { - klog.Fatalf("fail to complete the revert option: %s", err) + klog.Fatalf("could not complete the revert option: %s", err) } r := revert.NewReverterWithOptions(o) if err := r.Do(); err != nil { - klog.Fatalf("fail to revert the yurt node to a kubernetes node: %s", err) + klog.Fatalf("could not revert the yurt node to a kubernetes node: %s", err) } klog.Info("revert success") }, diff --git a/cmd/yurt-node-servant/static-pod-upgrade/upgrade.go b/cmd/yurt-node-servant/static-pod-upgrade/upgrade.go index dd2c8177e93..4c3721160e3 100644 --- a/cmd/yurt-node-servant/static-pod-upgrade/upgrade.go +++ b/cmd/yurt-node-servant/static-pod-upgrade/upgrade.go @@ -36,16 +36,16 @@ func NewUpgradeCmd() *cobra.Command { }) if err := o.Validate(); err != nil { - klog.Fatalf("Fail to validate static pod upgrade args, %v", err) + klog.Fatalf("could not validate static pod upgrade args, %v", err) } ctrl, err := upgrade.NewWithOptions(o) if err != nil { - klog.Fatalf("Fail to create static-pod-upgrade controller, %v", err) + klog.Fatalf("could not create static-pod-upgrade controller, %v", err) } if err = ctrl.Upgrade(); err != nil { - klog.Fatalf("Fail to upgrade static pod, %v", err) + klog.Fatalf("could not upgrade static pod, %v", err) } klog.Info("Static pod upgrade Success") diff --git a/cmd/yurt-tunnel-server/app/options/options.go b/cmd/yurt-tunnel-server/app/options/options.go index 32b045a7471..1804396d596 100644 --- a/cmd/yurt-tunnel-server/app/options/options.go +++ b/cmd/yurt-tunnel-server/app/options/options.go @@ -150,7 +150,7 @@ func (o *ServerOptions) Config() (*config.Config, error) { cfg.ListenMetaAddr = net.JoinHostPort(o.InsecureBindAddr, o.MetaPort) cfg.RootCert, err = certmanager.GenRootCertPool(o.KubeConfig, constants.YurttunnelCAFile) if err != nil { - return nil, fmt.Errorf("fail to generate the rootCertPool: %w", err) + return nil, fmt.Errorf("could not generate the rootCertPool: %w", err) } // function 'kubeutil.CreateClientSet' will try to create the clientset diff --git a/cmd/yurthub/app/config/config.go b/cmd/yurthub/app/config/config.go index 22255987b70..dcd5baaff88 100644 --- a/cmd/yurthub/app/config/config.go +++ b/cmd/yurthub/app/config/config.go @@ -121,7 +121,7 @@ func Complete(options *options.YurtHubOptions) (*YurtHubConfiguration, error) { serializerManager := serializer.NewSerializerManager() restMapperManager, err := meta.NewRESTMapperManager(options.DiskCachePath) if err != nil { - klog.Errorf("failed to create restMapperManager at path %s, %v", options.DiskCachePath, err) + klog.Errorf("could not create restMapperManager at path %s, %v", options.DiskCachePath, err) return nil, err } @@ -214,7 +214,7 @@ func parseRemoteServers(serverAddr string) ([]*url.URL, error) { for _, server := range servers { u, err := url.Parse(server) if err != nil { - klog.Errorf("failed to parse server address %q, %v", server, err) + klog.Errorf("could not parse server address %q, %v", server, err) return us, err } if u.Scheme == "" { diff --git a/cmd/yurthub/app/start.go b/cmd/yurthub/app/start.go index 14e101a2245..7738fdd2268 100644 --- a/cmd/yurthub/app/start.go +++ b/cmd/yurthub/app/start.go @@ -102,7 +102,7 @@ func Run(ctx context.Context, cfg *config.YurtHubConfiguration) error { klog.Infof("%d. prepare cloud kube clients", trace) cloudClients, err := createClients(cfg.HeartbeatTimeoutSeconds, cfg.RemoteServers, transportManager) if err != nil { - return fmt.Errorf("failed to create cloud clients, %w", err) + return fmt.Errorf("could not create cloud clients, %w", err) } trace++ @@ -249,7 +249,7 @@ func coordinatorRun(ctx context.Context, coorCertManager, err := coordinatorcertmgr.NewCertManager(cfg.CoordinatorPKIDir, cfg.YurtHubNamespace, cfg.ProxiedClient, cfg.SharedFactory) close(coordinatorInformerRegistryChan) // notify the coordinator secret informer registry event if err != nil { - klog.Errorf("coordinator failed to create coordinator cert manager, %v", err) + klog.Errorf("coordinator could not create coordinator cert manager, %v", err) return } klog.Info("coordinator new certManager success") @@ -274,14 +274,14 @@ func coordinatorRun(ctx context.Context, if apiServerIP == nil { apiServerService, err := serviceList.Services(util.YurtHubNamespace).Get(cfg.CoordinatorServerURL.Hostname()) if err != nil { - klog.Errorf("coordinator failed to get apiServer service, %v", err) + klog.Errorf("coordinator could not get apiServer service, %v", err) return } // rewrite coordinator service info for cfg coordinatorServerURL, err := url.Parse(fmt.Sprintf("https://%s:%s", apiServerService.Spec.ClusterIP, cfg.CoordinatorServerURL.Port())) if err != nil { - klog.Errorf("coordinator failed to parse apiServer service, %v", err) + klog.Errorf("coordinator could not parse apiServer service, %v", err) return } cfg.CoordinatorServerURL = coordinatorServerURL @@ -289,7 +289,7 @@ func coordinatorRun(ctx context.Context, if etcdIP == nil { etcdService, err := serviceList.Services(util.YurtHubNamespace).Get(etcdUrl.Hostname()) if err != nil { - klog.Errorf("coordinator failed to get etcd service, %v", err) + klog.Errorf("coordinator could not get etcd service, %v", err) return } cfg.CoordinatorStorageAddr = fmt.Sprintf("https://%s:%s", etcdService.Spec.ClusterIP, etcdUrl.Port()) @@ -297,7 +297,7 @@ func coordinatorRun(ctx context.Context, coorTransportMgr, err := yurtCoordinatorTransportMgrGetter(coorCertManager, ctx.Done()) if err != nil { - klog.Errorf("coordinator failed to create coordinator transport manager, %v", err) + klog.Errorf("coordinator could not create coordinator transport manager, %v", err) return } @@ -307,27 +307,27 @@ func coordinatorRun(ctx context.Context, Timeout: time.Duration(cfg.HeartbeatTimeoutSeconds) * time.Second, }) if err != nil { - klog.Errorf("coordinator failed to get coordinator client for yurt coordinator, %v", err) + klog.Errorf("coordinator could not get coordinator client for yurt coordinator, %v", err) return } coorHealthChecker, err := healthchecker.NewCoordinatorHealthChecker(cfg, coordinatorClient, cloudHealthChecker, ctx.Done()) if err != nil { - klog.Errorf("coordinator failed to create coordinator health checker, %v", err) + klog.Errorf("coordinator could not create coordinator health checker, %v", err) return } var elector *yurtcoordinator.HubElector elector, err = yurtcoordinator.NewHubElector(cfg, coordinatorClient, coorHealthChecker, cloudHealthChecker, ctx.Done()) if err != nil { - klog.Errorf("coordinator failed to create hub elector, %v", err) + klog.Errorf("coordinator could not create hub elector, %v", err) return } go elector.Run(ctx.Done()) coor, err := yurtcoordinator.NewCoordinator(ctx, cfg, cloudHealthChecker, restConfigMgr, coorCertManager, coorTransportMgr, elector) if err != nil { - klog.Errorf("coordinator failed to create coordinator, %v", err) + klog.Errorf("coordinator could not create coordinator, %v", err) return } go coor.Run() @@ -366,7 +366,7 @@ func yurtCoordinatorTransportMgrGetter(coordinatorCertMgr *coordinatorcertmgr.Ce coordinatorTransportMgr, err := transport.NewTransportManager(coordinatorCertMgr, stopCh) if err != nil { - return nil, fmt.Errorf("failed to create transport manager for yurt coordinator, %v", err) + return nil, fmt.Errorf("could not create transport manager for yurt coordinator, %v", err) } return coordinatorTransportMgr, nil } diff --git a/pkg/node-servant/components/yurthub.go b/pkg/node-servant/components/yurthub.go index ee20b5ed12e..75cfa04e48c 100644 --- a/pkg/node-servant/components/yurthub.go +++ b/pkg/node-servant/components/yurthub.go @@ -97,7 +97,7 @@ func (op *yurtHubOperator) Install() error { } content, err := os.ReadFile(configMapDataPath) if err != nil { - return fmt.Errorf("failed to read source file %s: %w", configMapDataPath, err) + return fmt.Errorf("could not read source file %s: %w", configMapDataPath, err) } klog.Infof("yurt-hub.yaml apiServerAddr: %+v", op.apiServerAddr) yssYurtHub, err := tmplutil.SubsituteTemplate(string(content), map[string]string{ @@ -224,7 +224,7 @@ func pingClusterHealthz(client *http.Client, addr string) (bool, error) { b, err := io.ReadAll(resp.Body) defer resp.Body.Close() if err != nil { - return false, fmt.Errorf("failed to read response of cluster healthz, %w", err) + return false, fmt.Errorf("could not read response of cluster healthz, %w", err) } if resp.StatusCode != http.StatusOK { diff --git a/pkg/node-servant/job.go b/pkg/node-servant/job.go index b14d86fac80..b7dee4911e1 100644 --- a/pkg/node-servant/job.go +++ b/pkg/node-servant/job.go @@ -61,7 +61,7 @@ func RenderNodeServantJob(action string, renderCtx map[string]string, nodeName s } srvJob, ok := srvJobObj.(*batchv1.Job) if !ok { - return nil, fmt.Errorf("fail to assert node-servant job") + return nil, fmt.Errorf("could not assert node-servant job") } return srvJob, nil diff --git a/pkg/node-servant/static-pod-upgrade/upgrade.go b/pkg/node-servant/static-pod-upgrade/upgrade.go index dc1c0150fe5..a54518b9e97 100644 --- a/pkg/node-servant/static-pod-upgrade/upgrade.go +++ b/pkg/node-servant/static-pod-upgrade/upgrade.go @@ -120,13 +120,13 @@ func (ctrl *Controller) AutoUpgrade() error { ok, err := ctrl.verify() if err != nil { if err := ctrl.rollbackManifest(); err != nil { - klog.Errorf("Fail to rollback manifest when upgrade failed, %v", err) + klog.Errorf("could not rollback manifest when upgrade failed, %v", err) } return err } if !ok { if err := ctrl.rollbackManifest(); err != nil { - klog.Errorf("Fail to rollback manifest when upgrade failed, %v", err) + klog.Errorf("could not rollback manifest when upgrade failed, %v", err) } return fmt.Errorf("the latest static pod is not running") } diff --git a/pkg/node-servant/static-pod-upgrade/util/pods.go b/pkg/node-servant/static-pod-upgrade/util/pods.go index b19c1a59a54..a13af366232 100644 --- a/pkg/node-servant/static-pod-upgrade/util/pods.go +++ b/pkg/node-servant/static-pod-upgrade/util/pods.go @@ -46,7 +46,7 @@ func GetPodFromYurtHub(namespace, name string) (*v1.Pod, error) { } } - return nil, fmt.Errorf("fail to find pod %s/%s", namespace, name) + return nil, fmt.Errorf("could not find pod %s/%s", namespace, name) } func GetPodsFromYurtHub(url string) (*v1.PodList, error) { @@ -75,7 +75,7 @@ func getPodsDataFromYurtHub(url string) ([]byte, error) { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("fail to access yurthub pods API, returned status: %v", resp.Status) + return nil, fmt.Errorf("could not access yurthub pods API, returned status: %v", resp.Status) } data, err := ioutil.ReadAll(resp.Body) @@ -92,7 +92,7 @@ func decodePods(data []byte) (*v1.PodList, error) { podList := new(v1.PodList) if _, _, err := codec.Decode(data, nil, podList); err != nil { - return nil, fmt.Errorf("failed to decode pod list: %s", err) + return nil, fmt.Errorf("could not decode pod list: %s", err) } return podList, nil } diff --git a/pkg/util/certmanager/factory/factory.go b/pkg/util/certmanager/factory/factory.go index c747b569092..6edb523c974 100644 --- a/pkg/util/certmanager/factory/factory.go +++ b/pkg/util/certmanager/factory/factory.go @@ -104,7 +104,7 @@ func (f *factory) New(cfg *CertManagerConfig) (certificate.Manager, error) { if util.IsNil(f.fileStore) { f.fileStore, err = store.NewFileStoreWrapper(cfg.ComponentName, cfg.CertDir, cfg.CertDir, "", "") if err != nil { - return nil, fmt.Errorf("failed to initialize the server certificate store: %w", err) + return nil, fmt.Errorf("could not initialize the server certificate store: %w", err) } } @@ -117,7 +117,7 @@ func (f *factory) New(cfg *CertManagerConfig) (certificate.Manager, error) { ips = newIPs } if err != nil { - klog.Errorf("failed to get ips for %s when preparing cr template, %v", cfg.ComponentName, err) + klog.Errorf("could not get ips for %s when preparing cr template, %v", cfg.ComponentName, err) return nil } } @@ -128,7 +128,7 @@ func (f *factory) New(cfg *CertManagerConfig) (certificate.Manager, error) { dnsNames = newDNSNames } if err != nil { - klog.Errorf("failed to get dns names for %s when preparing cr template, %v", cfg.ComponentName, err) + klog.Errorf("could not get dns names for %s when preparing cr template, %v", cfg.ComponentName, err) return nil } } diff --git a/pkg/util/certmanager/pki.go b/pkg/util/certmanager/pki.go index d114c8e716f..087e4d4adc4 100644 --- a/pkg/util/certmanager/pki.go +++ b/pkg/util/certmanager/pki.go @@ -84,7 +84,7 @@ func GenRootCertPool(kubeConfig, caFile string) (*x509.CertPool, error) { // load the root ca from the given kubeconfig file config, err := clientcmd.LoadFromFile(kubeConfig) if err != nil || config == nil { - return nil, fmt.Errorf("failed to load the kubeconfig file(%s), %w", + return nil, fmt.Errorf("could not load the kubeconfig file(%s), %w", kubeConfig, err) } @@ -173,7 +173,7 @@ func GenCertPoolUseCA(caFile string) (*x509.CertPool, error) { if os.IsNotExist(err) { return nil, fmt.Errorf("CA file(%s) doesn't exist", caFile) } - return nil, fmt.Errorf("fail to stat the CA file(%s): %w", caFile, err) + return nil, fmt.Errorf("could not stat the CA file(%s): %w", caFile, err) } caData, err := os.ReadFile(caFile) diff --git a/pkg/yurtadm/cmd/join/join.go b/pkg/yurtadm/cmd/join/join.go index 89a855e8500..45bfaf61660 100644 --- a/pkg/yurtadm/cmd/join/join.go +++ b/pkg/yurtadm/cmd/join/join.go @@ -283,7 +283,7 @@ func newJoinData(args []string, opt *joinOptions) (*joinData, error) { // Either use specified nodename or get hostname from OS envs name, err := edgenode.GetHostname(opt.nodeName) if err != nil { - klog.Errorf("failed to get node name, %v", err) + klog.Errorf("could not get node name, %v", err) return nil, err } @@ -327,7 +327,7 @@ func newJoinData(args []string, opt *joinOptions) (*joinData, error) { // get tls bootstrap config cfg, err := yurtadmutil.RetrieveBootstrapConfig(data) if err != nil { - klog.Errorf("failed to retrieve bootstrap config, %v", err) + klog.Errorf("could not retrieve bootstrap config, %v", err) return nil, err } data.tlsBootstrapCfg = cfg @@ -335,14 +335,14 @@ func newJoinData(args []string, opt *joinOptions) (*joinData, error) { // get kubernetes version client, err := kubeconfigutil.ToClientSet(cfg) if err != nil { - klog.Errorf("failed to create bootstrap client, %v", err) + klog.Errorf("could not create bootstrap client, %v", err) return nil, err } data.clientSet = client k8sVersion, err := yurtadmutil.GetKubernetesVersionFromCluster(client) if err != nil { - klog.Errorf("failed to get kubernetes version, %v", err) + klog.Errorf("could not get kubernetes version, %v", err) return nil, err } data.kubernetesVersion = k8sVersion @@ -400,7 +400,7 @@ func newJoinData(args []string, opt *joinOptions) (*joinData, error) { yurthubManifest, yurthubTemplate, err := yurtadmutil.GetStaticPodTemplateFromConfigMap(client, opt.namespace, util.WithConfigMapPrefix(yurthubYurtStaticSetName)) if err != nil { - klog.Errorf("hard-code yurthub manifest will be used, because failed to get yurthub template from kube-apiserver, %v", err) + klog.Errorf("hard-code yurthub manifest will be used, because could not get yurthub template from kube-apiserver, %v", err) yurthubManifest = yurtconstants.YurthubStaticPodManifest yurthubTemplate = yurtconstants.YurthubTemplate diff --git a/pkg/yurtadm/cmd/renew/certificate/certificate.go b/pkg/yurtadm/cmd/renew/certificate/certificate.go index 172f7d1face..c4e262cd75a 100644 --- a/pkg/yurtadm/cmd/renew/certificate/certificate.go +++ b/pkg/yurtadm/cmd/renew/certificate/certificate.go @@ -119,7 +119,7 @@ func parseRemoteServers(serverAddr string) ([]*url.URL, error) { for _, server := range servers { u, err := url.Parse(server) if err != nil { - klog.Errorf("failed to parse server address %s, %v", servers, err) + klog.Errorf("could not parse server address %s, %v", servers, err) return us, err } if u.Scheme == "" { diff --git a/pkg/yurtadm/util/edgenode/edgenode.go b/pkg/yurtadm/util/edgenode/edgenode.go index 26304306913..e57934d3222 100644 --- a/pkg/yurtadm/util/edgenode/edgenode.go +++ b/pkg/yurtadm/util/edgenode/edgenode.go @@ -62,7 +62,7 @@ func GetContentFormFile(filename string, regularExpression string) ([]string, er func GetSingleContentFromFile(filename string, regularExpression string) (string, error) { contents, err := GetContentFormFile(filename, regularExpression) if err != nil { - return "", fmt.Errorf("failed to read file %s, %w", filename, err) + return "", fmt.Errorf("could not read file %s, %w", filename, err) } if contents == nil { return "", fmt.Errorf("no matching string %s in file %s", regularExpression, filename) @@ -87,11 +87,11 @@ func EnsureDir(dirname string) error { func CopyFile(sourceFile string, destinationFile string, perm os.FileMode) error { content, err := os.ReadFile(sourceFile) if err != nil { - return fmt.Errorf("failed to read source file %s: %w", sourceFile, err) + return fmt.Errorf("could not read source file %s: %w", sourceFile, err) } err = os.WriteFile(destinationFile, content, perm) if err != nil { - return fmt.Errorf("failed to write destination file %s: %w", destinationFile, err) + return fmt.Errorf("could not write destination file %s: %w", destinationFile, err) } return nil } diff --git a/pkg/yurtadm/util/initsystem/initsystem_unix.go b/pkg/yurtadm/util/initsystem/initsystem_unix.go index 4acf4dca454..efbab4c5f5a 100644 --- a/pkg/yurtadm/util/initsystem/initsystem_unix.go +++ b/pkg/yurtadm/util/initsystem/initsystem_unix.go @@ -58,7 +58,7 @@ type SystemdInitSystem struct{} // reloadSystemd reloads the systemd daemon func (sysd SystemdInitSystem) reloadSystemd() error { if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil { - return errors.Wrap(err, "failed to reload systemd") + return errors.Wrap(err, "could not reload systemd") } return nil } diff --git a/pkg/yurtadm/util/kubernetes/kubernetes.go b/pkg/yurtadm/util/kubernetes/kubernetes.go index 472c89b33d6..587ec3cace5 100644 --- a/pkg/yurtadm/util/kubernetes/kubernetes.go +++ b/pkg/yurtadm/util/kubernetes/kubernetes.go @@ -88,7 +88,7 @@ func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, p } if waitForTimeout { - klog.Infof("continue to wait for job(%s) to complete until timeout, even if failed to get job, %v", job.GetName(), err) + klog.Infof("continue to wait for job(%s) to complete until timeout, even if could not get job, %v", job.GetName(), err) continue } return err @@ -99,7 +99,7 @@ func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, p Delete(context.Background(), job.GetName(), metav1.DeleteOptions{ PropagationPolicy: &PropagationPolicy, }); err != nil { - klog.Errorf("fail to delete succeeded servant job(%s): %s", job.GetName(), err) + klog.Errorf("could not delete succeeded servant job(%s): %s", job.GetName(), err) return err } return nil @@ -339,7 +339,7 @@ func GetKubernetesVersionFromCluster(client kubernetes.Interface) (string, error // Also, the config map really should be KubeadmConfigConfigMap... configMap, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, constants.KubeadmConfigConfigMap) if err != nil { - return kubernetesVersion, pkgerrors.Wrap(err, "failed to get config map") + return kubernetesVersion, pkgerrors.Wrap(err, "could not get config map") } // gets ClusterConfiguration from kubeadm-config @@ -363,7 +363,7 @@ func GetKubernetesVersionFromCluster(client kubernetes.Interface) (string, error } if len(kubernetesVersion) == 0 { - return kubernetesVersion, errors.New("failed to get Kubernetes version") + return kubernetesVersion, errors.New("could not get Kubernetes version") } klog.Infof("kubernetes version: %s", kubernetesVersion) @@ -516,7 +516,7 @@ func GetStaticPodTemplateFromConfigMap(client kubernetes.Interface, namespace, n namespace, name) if err != nil { - return "", "", pkgerrors.Errorf("failed to get configmap of %s/%s yurtstaticset, err: %+v", namespace, name, err) + return "", "", pkgerrors.Errorf("could not get configmap of %s/%s yurtstaticset, err: %+v", namespace, name, err) } if len(configMap.Data) == 1 { @@ -537,7 +537,7 @@ func GetDefaultClientSet() (*kubernetes.Clientset, error) { cfg, err := clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { - return nil, fmt.Errorf("fail to create the clientset based on %s: %w", kubeConfig, err) + return nil, fmt.Errorf("could not create the clientset based on %s: %w", kubeConfig, err) } cliSet, err := kubernetes.NewForConfig(cfg) if err != nil { diff --git a/pkg/yurthub/cachemanager/cache_agent.go b/pkg/yurthub/cachemanager/cache_agent.go index e8bcbb3c497..82fd9bb07a3 100644 --- a/pkg/yurthub/cachemanager/cache_agent.go +++ b/pkg/yurthub/cachemanager/cache_agent.go @@ -131,7 +131,7 @@ func (ca *CacheAgent) deleteAgentCache(deletedAgents sets.String) { components := deletedAgents.List() for i := range components { if err := ca.store.DeleteComponentResources(components[i]); err != nil { - klog.Errorf("failed to cleanup cache for deleted agent(%s), %v", components[i], err) + klog.Errorf("could not cleanup cache for deleted agent(%s), %v", components[i], err) } else { klog.Infof("cleanup cache for agent(%s) successfully", components[i]) } diff --git a/pkg/yurthub/cachemanager/cache_manager.go b/pkg/yurthub/cachemanager/cache_manager.go index 3e11f00db47..bcb071dae46 100644 --- a/pkg/yurthub/cachemanager/cache_manager.go +++ b/pkg/yurthub/cachemanager/cache_manager.go @@ -103,11 +103,11 @@ func (cm *cacheManager) CacheResponse(req *http.Request, prc io.ReadCloser, stop var buf bytes.Buffer n, err := buf.ReadFrom(prc) if err != nil { - klog.Errorf("failed to cache response, %v", err) + klog.Errorf("could not cache response, %v", err) return err } else if n == 0 { err := fmt.Errorf("read 0-length data from response, %s", util.ReqInfoString(info)) - klog.Errorf("failed to cache response, %v", err) + klog.Errorf("could not cache response, %v", err) return err } else { klog.V(5).Infof("cache %d bytes from response for %s", n, util.ReqInfoString(info)) @@ -125,10 +125,10 @@ func (cm *cacheManager) QueryCache(req *http.Request) (runtime.Object, error) { ctx := req.Context() info, ok := apirequest.RequestInfoFrom(ctx) if !ok || info == nil || info.Resource == "" { - return nil, fmt.Errorf("failed to get request info for request %s", util.ReqString(req)) + return nil, fmt.Errorf("could not get request info for request %s", util.ReqString(req)) } if !info.IsResourceRequest { - return nil, fmt.Errorf("failed to QueryCache for getting non-resource request %s", util.ReqString(req)) + return nil, fmt.Errorf("could not QueryCache for getting non-resource request %s", util.ReqString(req)) } switch info.Verb { @@ -137,7 +137,7 @@ func (cm *cacheManager) QueryCache(req *http.Request) (runtime.Object, error) { case "get", "patch", "update": return cm.queryOneObject(req) default: - return nil, fmt.Errorf("failed to QueryCache, unsupported verb %s of request %s", info.Verb, util.ReqString(req)) + return nil, fmt.Errorf("could not QueryCache, unsupported verb %s of request %s", info.Verb, util.ReqString(req)) } } @@ -165,17 +165,17 @@ func (cm *cacheManager) queryListObject(req *http.Request) (runtime.Object, erro Resource: info.Resource, }) if err != nil { - klog.Errorf("failed to get gvk for ListObject for req: %s, %v", util.ReqString(req), err) + klog.Errorf("could not get gvk for ListObject for req: %s, %v", util.ReqString(req), err) // If err is hubmeta.ErrGVRNotRecognized, the reverse proxy will set the HTTP Status Code as 404. return nil, err } listObj, err := generateEmptyListObjOfGVK(listGvk) if err != nil { - klog.Errorf("failed to create ListObj for gvk %s for req: %s, %v", listGvk.String(), util.ReqString(req), err) + klog.Errorf("could not create ListObj for gvk %s for req: %s, %v", listGvk.String(), util.ReqString(req), err) return nil, err } if err := setListObjSelfLink(listObj, req); err != nil { - klog.Errorf("failed to set selfLink for ListObj of gvk %s for req: %s, %v", listGvk.String(), util.ReqString(req), err) + klog.Errorf("could not set selfLink for ListObj of gvk %s for req: %s, %v", listGvk.String(), util.ReqString(req), err) return nil, err } @@ -185,7 +185,7 @@ func (cm *cacheManager) queryListObject(req *http.Request) (runtime.Object, erro // when the specified resource is not found return an empty list object, to keep same as APIServer. return listObj, nil } else if err != nil { - klog.Errorf("failed to list key %s for request %s, %v", key.Key(), util.ReqString(req), err) + klog.Errorf("could not list key %s for request %s, %v", key.Key(), util.ReqString(req), err) return nil, err } else if len(objs) == 0 { if isKubeletPodRequest(req) { @@ -208,12 +208,12 @@ func (cm *cacheManager) queryListObject(req *http.Request) (runtime.Object, erro klog.Warningf("The restMapper's kind(%v) and object's kind(%v) are inconsistent ", listGvk.Kind, gotObjListKind) listGvk.Kind = gotObjListKind if listObj, err = generateEmptyListObjOfGVK(listGvk); err != nil { - klog.Errorf("failed to create list obj for req: %s, whose gvk is %s, %v", util.ReqString(req), listGvk.String(), err) + klog.Errorf("could not create list obj for req: %s, whose gvk is %s, %v", util.ReqString(req), listGvk.String(), err) return nil, err } } if err := completeListObjWithObjs(listObj, objs); err != nil { - klog.Errorf("failed to complete the list obj %s for req %s, %v", listGvk, util.ReqString(req), err) + klog.Errorf("could not complete the list obj %s for req %s, %v", listGvk, util.ReqString(req), err) return nil, err } return listObj, nil @@ -223,7 +223,7 @@ func (cm *cacheManager) queryOneObject(req *http.Request) (runtime.Object, error ctx := req.Context() info, ok := apirequest.RequestInfoFrom(ctx) if !ok || info == nil || info.Resource == "" { - return nil, fmt.Errorf("failed to get request info for request %s", util.ReqString(req)) + return nil, fmt.Errorf("could not get request info for request %s", util.ReqString(req)) } comp, _ := util.ClientComponentFrom(ctx) @@ -260,7 +260,7 @@ func (cm *cacheManager) queryOneObject(req *http.Request) (runtime.Object, error klog.V(4).Infof("component: %s try to get key: %s", comp, key.Key()) obj, err := cm.storage.Get(key) if err != nil { - klog.Errorf("failed to get obj %s from storage, %v", key.Key(), err) + klog.Errorf("could not get obj %s from storage, %v", key.Key(), err) return nil, err } // When yurthub restart, the data stored in in-memory cache will lose, @@ -313,7 +313,7 @@ func completeListObjWithObjs(listObj runtime.Object, objs []runtime.Object) erro } if err := meta.SetList(listObj, objs); err != nil { - return fmt.Errorf("failed to meta set list with %d objects, %v", len(objs), err) + return fmt.Errorf("could not meta set list with %d objects, %v", len(objs), err) } return accessor.SetResourceVersion(listObj, strconv.Itoa(listRv)) @@ -325,7 +325,7 @@ func generateEmptyListObjOfGVK(listGvk schema.GroupVersionKind) (runtime.Object, if scheme.Scheme.Recognizes(listGvk) { listObj, err = scheme.Scheme.New(listGvk) if err != nil { - return nil, fmt.Errorf("failed to create list object(%v), %v", listGvk, err) + return nil, fmt.Errorf("could not create list object(%v), %v", listGvk, err) } } else { listObj = new(unstructured.UnstructuredList) @@ -371,8 +371,8 @@ func (cm *cacheManager) saveWatchObject(ctx context.Context, info *apirequest.Re respContentType, _ := util.RespContentTypeFrom(ctx) s := cm.serializerManager.CreateSerializer(respContentType, info.APIGroup, info.APIVersion, info.Resource) if s == nil { - klog.Errorf("failed to create serializer in saveWatchObject, %s", util.ReqInfoString(info)) - return fmt.Errorf("failed to create serializer in saveWatchObject, %s", util.ReqInfoString(info)) + klog.Errorf("could not create serializer in saveWatchObject, %s", util.ReqInfoString(info)) + return fmt.Errorf("could not create serializer in saveWatchObject, %s", util.ReqInfoString(info)) } accessor := meta.NewAccessor() @@ -397,13 +397,13 @@ func (cm *cacheManager) saveWatchObject(ctx context.Context, info *apirequest.Re case watch.Added, watch.Modified, watch.Deleted: name, err := accessor.Name(obj) if err != nil || name == "" { - klog.Errorf("failed to get name of watch object, %v", err) + klog.Errorf("could not get name of watch object, %v", err) continue } ns, err := accessor.Namespace(obj) if err != nil { - klog.Errorf("failed to get namespace of watch object, %v", err) + klog.Errorf("could not get namespace of watch object, %v", err) continue } @@ -416,7 +416,7 @@ func (cm *cacheManager) saveWatchObject(ctx context.Context, info *apirequest.Re Version: info.APIVersion, }) if err != nil { - klog.Errorf("failed to get cache path, %v", err) + klog.Errorf("could not get cache path, %v", err) continue } @@ -440,7 +440,7 @@ func (cm *cacheManager) saveWatchObject(ctx context.Context, info *apirequest.Re } if err != nil { - klog.Errorf("failed to process watch object %s, %v", key.Key(), err) + klog.Errorf("could not process watch object %s, %v", key.Key(), err) } case watch.Bookmark: rv, _ := accessor.ResourceVersion(obj) @@ -456,13 +456,13 @@ func (cm *cacheManager) saveListObject(ctx context.Context, info *apirequest.Req respContentType, _ := util.RespContentTypeFrom(ctx) s := cm.serializerManager.CreateSerializer(respContentType, info.APIGroup, info.APIVersion, info.Resource) if s == nil { - klog.Errorf("failed to create serializer in saveListObject, %s", util.ReqInfoString(info)) - return fmt.Errorf("failed to create serializer in saveListObject, %s", util.ReqInfoString(info)) + klog.Errorf("could not create serializer in saveListObject, %s", util.ReqInfoString(info)) + return fmt.Errorf("could not create serializer in saveListObject, %s", util.ReqInfoString(info)) } list, err := s.Decode(b) if err != nil || list == nil { - klog.Errorf("failed to decode response %s in saveListObject, response content type: %s, requestInfo: %s, %v", + klog.Errorf("could not decode response %s in saveListObject, response content type: %s, requestInfo: %s, %v", string(b), respContentType, util.ReqInfoString(info), err) return err } @@ -488,7 +488,7 @@ func (cm *cacheManager) saveListObject(ctx context.Context, info *apirequest.Req // Verify if DynamicRESTMapper(which store the CRD info) needs to be updated if err := cm.restMapperManager.UpdateKind(schema.GroupVersionKind{Group: info.APIGroup, Version: info.APIVersion, Kind: kind}); err != nil { - klog.Errorf("failed to update the DynamicRESTMapper %v", err) + klog.Errorf("could not update the DynamicRESTMapper %v", err) } if info.Name != "" && len(items) == 1 { @@ -547,16 +547,16 @@ func (cm *cacheManager) saveOneObject(ctx context.Context, info *apirequest.Requ s := cm.serializerManager.CreateSerializer(respContentType, info.APIGroup, info.APIVersion, info.Resource) if s == nil { - klog.Errorf("failed to create serializer in saveOneObject, %s", util.ReqInfoString(info)) - return fmt.Errorf("failed to create serializer in saveOneObject, %s", util.ReqInfoString(info)) + klog.Errorf("could not create serializer in saveOneObject, %s", util.ReqInfoString(info)) + return fmt.Errorf("could not create serializer in saveOneObject, %s", util.ReqInfoString(info)) } obj, err := s.Decode(b) if err != nil { - klog.Errorf("failed to decode response %s in saveOneObject(respContentType:%s): %s, %v", string(b), respContentType, util.ReqInfoString(info), err) + klog.Errorf("could not decode response %s in saveOneObject(respContentType:%s): %s, %v", string(b), respContentType, util.ReqInfoString(info), err) return err } else if obj == nil { - klog.Info("failed to decode nil object. skip cache") + klog.Info("could not decode nil object. skip cache") return nil } else if _, ok := obj.(*metav1.Status); ok { klog.Infof("it's not need to cache metav1.Status.") @@ -585,19 +585,19 @@ func (cm *cacheManager) saveOneObject(ctx context.Context, info *apirequest.Requ Version: info.APIVersion, }) if err != nil { - klog.Errorf("failed to get cache key(%s:%s:%s:%s), %v", comp, info.Resource, info.Namespace, info.Name, err) + klog.Errorf("could not get cache key(%s:%s:%s:%s), %v", comp, info.Resource, info.Namespace, info.Name, err) return err } // Verify if DynamicRESTMapper(which store the CRD info) needs to be updated gvk := obj.GetObjectKind().GroupVersionKind() if err := cm.restMapperManager.UpdateKind(gvk); err != nil { - klog.Errorf("failed to update the DynamicRESTMapper %v", err) + klog.Errorf("could not update the DynamicRESTMapper %v", err) } err = cm.storeObjectWithKey(key, obj) if err != nil { - klog.Errorf("failed to store object %s, %v", key.Key(), err) + klog.Errorf("could not store object %s, %v", key.Key(), err) return err } @@ -627,7 +627,7 @@ func (cm *cacheManager) storeObjectWithKey(key storage.Key, obj runtime.Object) newRv, err := accessor.ResourceVersion(obj) if err != nil { - return fmt.Errorf("failed to get new object resource version for %s, %v", key, err) + return fmt.Errorf("could not get new object resource version for %s, %v", key, err) } klog.V(4).Infof("try to store obj of key %s, obj: %v", key.Key(), obj) @@ -644,13 +644,13 @@ func (cm *cacheManager) storeObjectWithKey(key storage.Key, obj runtime.Object) klog.V(2).Infof("skip to cache obj because key(%s) is under processing", key) return nil } - return fmt.Errorf("failed to create obj of key: %s, %v", key.Key(), err) + return fmt.Errorf("could not create obj of key: %s, %v", key.Key(), err) } case storage.ErrStorageAccessConflict: klog.V(2).Infof("skip to cache watch event because key(%s) is under processing", key) return nil default: - return fmt.Errorf("failed to store obj with rv %s of key: %s, %v", newRv, key.Key(), err) + return fmt.Errorf("could not store obj with rv %s of key: %s, %v", newRv, key.Key(), err) } return nil } diff --git a/pkg/yurthub/cachemanager/storage_wrapper.go b/pkg/yurthub/cachemanager/storage_wrapper.go index f9c8db77e7e..be29e3c0426 100644 --- a/pkg/yurthub/cachemanager/storage_wrapper.go +++ b/pkg/yurthub/cachemanager/storage_wrapper.go @@ -83,7 +83,7 @@ func (sw *storageWrapper) Create(key storage.Key, obj runtime.Object) error { var buf bytes.Buffer if obj != nil { if err := sw.backendSerializer.Encode(obj, &buf); err != nil { - klog.Errorf("failed to encode object in create for %s, %v", key.Key(), err) + klog.Errorf("could not encode object in create for %s, %v", key.Key(), err) return err } } @@ -175,7 +175,7 @@ func (sw *storageWrapper) List(key storage.Key) ([]runtime.Object, error) { func (sw *storageWrapper) Update(key storage.Key, obj runtime.Object, rv uint64) (runtime.Object, error) { var buf bytes.Buffer if err := sw.backendSerializer.Encode(obj, &buf); err != nil { - klog.Errorf("failed to encode object in update for %s, %v", key.Key(), err) + klog.Errorf("could not encode object in update for %s, %v", key.Key(), err) return nil, err } @@ -183,7 +183,7 @@ func (sw *storageWrapper) Update(key storage.Key, obj runtime.Object, rv uint64) if err == storage.ErrUpdateConflict { obj, _, dErr := sw.backendSerializer.Decode(buf, nil, nil) if dErr != nil { - return nil, fmt.Errorf("failed to decode existing obj of key %s, %v", key.Key(), dErr) + return nil, fmt.Errorf("could not decode existing obj of key %s, %v", key.Key(), dErr) } return obj, err } @@ -198,7 +198,7 @@ func (sw *storageWrapper) ReplaceComponentList(component string, gvr schema.Grou contents := make(map[storage.Key][]byte, len(objs)) for key, obj := range objs { if err := sw.backendSerializer.Encode(obj, &buf); err != nil { - klog.Errorf("failed to encode object in update for %s, %v", key.Key(), err) + klog.Errorf("could not encode object in update for %s, %v", key.Key(), err) return err } contents[key] = make([]byte, len(buf.Bytes())) diff --git a/pkg/yurthub/certificate/kubeletcertificate/kubelet_certificate.go b/pkg/yurthub/certificate/kubeletcertificate/kubelet_certificate.go index c3a7b332e02..0db01ee19b4 100644 --- a/pkg/yurthub/certificate/kubeletcertificate/kubelet_certificate.go +++ b/pkg/yurthub/certificate/kubeletcertificate/kubelet_certificate.go @@ -96,7 +96,7 @@ func (kcm *kubeletCertManager) GetAPIServerClientCert() *tls.Certificate { klog.Warningf("current certificate: %s is expired, reload it", kcm.kubeletPemFile) cert, err := loadFile(kcm.kubeletPemFile) if err != nil { - klog.Errorf("failed to load client certificate(%s), %v", kcm.kubeletPemFile, err) + klog.Errorf("could not load client certificate(%s), %v", kcm.kubeletPemFile, err) return nil } kcm.cert = cert diff --git a/pkg/yurthub/certificate/testdata/fake_client.go b/pkg/yurthub/certificate/testdata/fake_client.go index 96e664b582a..6c18e121803 100644 --- a/pkg/yurthub/certificate/testdata/fake_client.go +++ b/pkg/yurthub/certificate/testdata/fake_client.go @@ -310,7 +310,7 @@ func (ca *CertificateAuthority) Sign(crDER []byte, policy SigningPolicy) ([]byte der, err := x509.CreateCertificate(rand.Reader, tmpl, ca.Certificate, cr.PublicKey, ca.PrivateKey) if err != nil { - return nil, fmt.Errorf("failed to sign certificate: %v", err) + return nil, fmt.Errorf("could not sign certificate: %v", err) } return der, nil } diff --git a/pkg/yurthub/certificate/token/token.go b/pkg/yurthub/certificate/token/token.go index 459470e5b41..8a9f49bba4a 100644 --- a/pkg/yurthub/certificate/token/token.go +++ b/pkg/yurthub/certificate/token/token.go @@ -149,7 +149,7 @@ func (ycm *yurtHubClientCertManager) verifyServerAddrOrCleanup(servers []*url.UR func (ycm *yurtHubClientCertManager) Start() { err := ycm.prepareConfigAndCaFile() if err != nil { - klog.Errorf("failed to prepare config and ca file, %v", err) + klog.Errorf("could not prepare config and ca file, %v", err) return } @@ -171,7 +171,7 @@ func (ycm *yurtHubClientCertManager) prepareConfigAndCaFile() error { if len(ycm.bootstrapFile) != 0 { // 1. load bootstrap config if tlsBootstrapCfg, err = clientcmd.LoadFromFile(ycm.getBootstrapConfFile()); err != nil { - klog.Errorf("maybe hub agent restarted, failed to load bootstrap config file(%s), %v.", ycm.getBootstrapConfFile(), err) + klog.Errorf("maybe hub agent restarted, could not load bootstrap config file(%s), %v.", ycm.getBootstrapConfFile(), err) } else { klog.V(2).Infof("%s file is configured, just use it", ycm.getBootstrapConfFile()) } @@ -220,7 +220,7 @@ func (ycm *yurtHubClientCertManager) prepareConfigAndCaFile() error { return errors.Wrap(err, "couldn't stat bootstrap config file") } else if !exist { if tlsBootstrapCfg, err = ycm.retrieveHubBootstrapConfig(ycm.joinToken); err != nil { - return errors.Wrap(err, "failed to retrieve bootstrap config") + return errors.Wrap(err, "could not retrieve bootstrap config") } } else { klog.V(2).Infof("%s file already exists, so reuse it", ycm.getBootstrapConfFile()) diff --git a/pkg/yurthub/filter/filter.go b/pkg/yurthub/filter/filter.go index 9700fb4a3d6..9a4e5133958 100644 --- a/pkg/yurthub/filter/filter.go +++ b/pkg/yurthub/filter/filter.go @@ -142,7 +142,7 @@ func newFilterReadCloser( respContentType, _ := util.RespContentTypeFrom(ctx) s := CreateSerializer(respContentType, info, sm) if s == nil { - klog.Errorf("skip filter, failed to create serializer in %s", ownerName) + klog.Errorf("skip filter, could not create serializer in %s", ownerName) return 0, rc, nil } @@ -210,7 +210,7 @@ func (frc *filterReadCloser) ObjectResponseFilter(rc io.ReadCloser) (*bytes.Buff } obj, err := frc.serializer.Decode(buf.Bytes()) if err != nil || obj == nil { - klog.Errorf("skip filter, failed to decode response in HandleObjectResponse of %s %v", frc.ownerName, err) + klog.Errorf("skip filter, could not decode response in HandleObjectResponse of %s %v", frc.ownerName, err) return &buf, nil } @@ -228,7 +228,7 @@ func (frc *filterReadCloser) StreamResponseFilter(rc io.ReadCloser, ch chan *byt d, err := frc.serializer.WatchDecoder(rc) if err != nil { - klog.Errorf("failed to get watch decoder in StreamResponseFilter of %s, %v", frc.ownerName, err) + klog.Errorf("could not get watch decoder in StreamResponseFilter of %s, %v", frc.ownerName, err) return err } @@ -251,7 +251,7 @@ func (frc *filterReadCloser) StreamResponseFilter(rc io.ReadCloser, ch chan *byt buf := &bytes.Buffer{} _, err = frc.serializer.WatchEncode(buf, &wEvent) if err != nil { - klog.Errorf("failed to encode resource in StreamResponseFilter of %s, %v", frc.ownerName, err) + klog.Errorf("could not encode resource in StreamResponseFilter of %s, %v", frc.ownerName, err) return err } ch <- buf diff --git a/pkg/yurthub/filter/nodeportisolation/filter.go b/pkg/yurthub/filter/nodeportisolation/filter.go index 69c2a028020..9b443125562 100644 --- a/pkg/yurthub/filter/nodeportisolation/filter.go +++ b/pkg/yurthub/filter/nodeportisolation/filter.go @@ -127,7 +127,7 @@ func (nif *nodePortIsolationFilter) resolveNodePoolName() string { node, err := nif.client.CoreV1().Nodes().Get(context.Background(), nif.nodeName, metav1.GetOptions{}) if err != nil { - klog.Warningf("skip isolateNodePortService filter, failed to get node(%s), %v", nif.nodeName, err) + klog.Warningf("skip isolateNodePortService filter, could not get node(%s), %v", nif.nodeName, err) return nif.nodePoolName } nif.nodePoolName = node.Labels[apps.NodePoolLabel] diff --git a/pkg/yurthub/filter/servicetopology/filter.go b/pkg/yurthub/filter/servicetopology/filter.go index 4e4cf7af76a..31bbf72cdc5 100644 --- a/pkg/yurthub/filter/servicetopology/filter.go +++ b/pkg/yurthub/filter/servicetopology/filter.go @@ -119,7 +119,7 @@ func (stf *serviceTopologyFilter) resolveNodePoolName() string { node, err := stf.client.CoreV1().Nodes().Get(context.Background(), stf.nodeName, metav1.GetOptions{}) if err != nil { - klog.Warningf("failed to get node(%s) in serviceTopologyFilter filter, %v", stf.nodeName, err) + klog.Warningf("could not get node(%s) in serviceTopologyFilter filter, %v", stf.nodeName, err) return stf.nodePoolName } stf.nodePoolName = node.Labels[apps.NodePoolLabel] @@ -200,7 +200,7 @@ func (stf *serviceTopologyFilter) resolveServiceTopologyType(obj runtime.Object) svc, err := stf.serviceLister.Services(svcNamespace).Get(svcName) if err != nil { - klog.Warningf("serviceTopologyFilterHandler: failed to get service %s/%s, err: %v", svcNamespace, svcName, err) + klog.Warningf("serviceTopologyFilterHandler: could not get service %s/%s, err: %v", svcNamespace, svcName, err) return false, "" } @@ -232,7 +232,7 @@ func (stf *serviceTopologyFilter) nodePoolTopologyHandler(obj runtime.Object) ru runtimeObj, err := stf.nodePoolLister.Get(nodePoolName) if err != nil { - klog.Warningf("serviceTopologyFilterHandler: failed to get nodepool %s, err: %v", nodePoolName, err) + klog.Warningf("serviceTopologyFilterHandler: could not get nodepool %s, err: %v", nodePoolName, err) return obj } var nodePool *v1beta1.NodePool diff --git a/pkg/yurthub/gc/gc.go b/pkg/yurthub/gc/gc.go index 344d2287a79..b4d57229880 100644 --- a/pkg/yurthub/gc/gc.go +++ b/pkg/yurthub/gc/gc.go @@ -98,7 +98,7 @@ func (m *GCManager) gcPodsWhenRestart() { Resource: "pods", }) if err != nil { - klog.Errorf("failed to list keys for kubelet pods, %v", err) + klog.Errorf("could not list keys for kubelet pods, %v", err) return } else if len(localPodKeys) == 0 { klog.Infof("local storage for kubelet pods is empty, not need to gc pods") @@ -138,7 +138,7 @@ func (m *GCManager) gcPodsWhenRestart() { Resources: "pods", }) if err != nil { - klog.Errorf("failed to get pod key for %s/%s, %v", ns, name, err) + klog.Errorf("could not get pod key for %s/%s, %v", ns, name, err) continue } currentPodKeys[key] = struct{}{} @@ -159,7 +159,7 @@ func (m *GCManager) gcPodsWhenRestart() { for _, key := range deletedPods { if err := m.store.Delete(key); err != nil { - klog.Errorf("failed to gc pod %s, %v", key, err) + klog.Errorf("could not gc pod %s, %v", key, err) } else { klog.Infof("gc pod %s successfully", key) } @@ -205,7 +205,7 @@ func (m *GCManager) gcEvents(kubeClient clientset.Interface, component string) { for _, key := range deletedEvents { if err := m.store.Delete(key); err != nil { - klog.Errorf("failed to gc events %s, %v", key.Key(), err) + klog.Errorf("could not gc events %s, %v", key.Key(), err) } else { klog.Infof("gc events %s successfully", key.Key()) } diff --git a/pkg/yurthub/healthchecker/health_checker.go b/pkg/yurthub/healthchecker/health_checker.go index a70a5699309..0af768d25fb 100644 --- a/pkg/yurthub/healthchecker/health_checker.go +++ b/pkg/yurthub/healthchecker/health_checker.go @@ -227,20 +227,20 @@ func (hc *cloudAPIServerHealthChecker) setLastNodeLease(lease *coordinationv1.Le Version: "v1", }) if err != nil { - return fmt.Errorf("failed to get key for lease %s/%s, %v", lease.Namespace, lease.Name, err) + return fmt.Errorf("could not get key for lease %s/%s, %v", lease.Namespace, lease.Name, err) } rv, err := strconv.ParseUint(lease.ResourceVersion, 10, 64) if err != nil { - return fmt.Errorf("failed to convert rv string %s of lease %s/%s, %v", lease.ResourceVersion, lease.Namespace, lease.Name, err) + return fmt.Errorf("could not convert rv string %s of lease %s/%s, %v", lease.ResourceVersion, lease.Namespace, lease.Name, err) } _, err = hc.sw.Update(leaseKey, lease, rv) if err == storage.ErrStorageNotFound { klog.Infof("find no lease of %s in storage, init a new one", leaseKey.Key()) if err := hc.sw.Create(leaseKey, lease); err != nil { - return fmt.Errorf("failed to create the lease %s, %v", leaseKey.Key(), err) + return fmt.Errorf("could not create the lease %s, %v", leaseKey.Key(), err) } } else if err != nil { - return fmt.Errorf("failed to update lease %s/%s, %v", lease.Namespace, lease.Name, err) + return fmt.Errorf("could not update lease %s/%s, %v", lease.Namespace, lease.Name, err) } return nil } diff --git a/pkg/yurthub/healthchecker/node_lease.go b/pkg/yurthub/healthchecker/node_lease.go index d77361f1bb4..cd48ba0b222 100644 --- a/pkg/yurthub/healthchecker/node_lease.go +++ b/pkg/yurthub/healthchecker/node_lease.go @@ -162,7 +162,7 @@ func (nl *nodeLeaseImpl) newLease(base *coordinationv1.Lease) *coordinationv1.Le }, } } else { - klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", nl.holderIdentity, err) + klog.Errorf("could not get node %q when trying to set owner ref to the node lease: %v", nl.holderIdentity, err) } } return lease diff --git a/pkg/yurthub/healthchecker/prober.go b/pkg/yurthub/healthchecker/prober.go index 0ef8d5fdd73..51f14828ac6 100644 --- a/pkg/yurthub/healthchecker/prober.go +++ b/pkg/yurthub/healthchecker/prober.go @@ -88,13 +88,13 @@ func (p *prober) Probe(phase string) bool { lease, err := p.nodeLease.Update(baseLease) if err == nil { if err := p.setLastNodeLease(lease); err != nil { - klog.Errorf("failed to store last node lease: %v", err) + klog.Errorf("could not store last node lease: %v", err) } p.markAsHealthy(phase) return true } - klog.Errorf("failed to probe: %v, remote server %s", err, p.ServerName()) + klog.Errorf("could not probe: %v, remote server %s", err, p.ServerName()) p.markAsUnhealthy(phase) return false } diff --git a/pkg/yurthub/kubernetes/meta/restmapper.go b/pkg/yurthub/kubernetes/meta/restmapper.go index 7a2bf8102ed..7c41731015b 100644 --- a/pkg/yurthub/kubernetes/meta/restmapper.go +++ b/pkg/yurthub/kubernetes/meta/restmapper.go @@ -86,11 +86,11 @@ func NewRESTMapperManager(baseDir string) (*RESTMapperManager, error) { dm = make(map[schema.GroupVersionResource]schema.GroupVersionKind) err = storage.CreateFile(filepath.Join(baseDir, CacheDynamicRESTMapperKey), []byte{}) if err != nil { - return nil, fmt.Errorf("failed to init dynamic RESTMapper file at %s, %v", cachedFilePath, err) + return nil, fmt.Errorf("could not init dynamic RESTMapper file at %s, %v", cachedFilePath, err) } klog.Infof("initialize an empty DynamicRESTMapper") } else if err != nil { - return nil, fmt.Errorf("failed to read existing RESTMapper file at %s, %v", cachedFilePath, err) + return nil, fmt.Errorf("could not read existing RESTMapper file at %s, %v", cachedFilePath, err) } if len(b) != 0 { @@ -159,7 +159,7 @@ func (rm *RESTMapperManager) updateCachedDynamicRESTMapper() error { } err = rm.storage.Write(rm.cachedFilePath, d) if err != nil { - return fmt.Errorf("failed to update cached dynamic RESTMapper, %v", err) + return fmt.Errorf("could not update cached dynamic RESTMapper, %v", err) } return nil } @@ -230,7 +230,7 @@ func unmarshalDynamicRESTMapper(data []byte) (map[schema.GroupVersionResource]sc cacheMapper := make(map[string]string) err := json.Unmarshal(data, &cacheMapper) if err != nil { - return nil, fmt.Errorf("failed to get cached CRDRESTMapper, %v", err) + return nil, fmt.Errorf("could not get cached CRDRESTMapper, %v", err) } for gvrString, kindString := range cacheMapper { diff --git a/pkg/yurthub/network/iptables.go b/pkg/yurthub/network/iptables.go index d21f2da84f3..07a84a6f70b 100644 --- a/pkg/yurthub/network/iptables.go +++ b/pkg/yurthub/network/iptables.go @@ -83,7 +83,7 @@ func (im *IptablesManager) CleanUpIptablesRules() error { err := im.iptables.DeleteRule(rule.table, rule.chain, rule.args...) if err != nil { errs = append(errs, err) - klog.Errorf("failed to delete iptables rule(%s -t %s %s %s), %v", rule.pos, rule.table, rule.chain, strings.Join(rule.args, " "), err) + klog.Errorf("could not delete iptables rule(%s -t %s %s %s), %v", rule.pos, rule.table, rule.chain, strings.Join(rule.args, " "), err) } } return utilerrors.NewAggregate(errs) diff --git a/pkg/yurthub/network/network.go b/pkg/yurthub/network/network.go index 779d593620e..737f83a2911 100644 --- a/pkg/yurthub/network/network.go +++ b/pkg/yurthub/network/network.go @@ -66,12 +66,12 @@ func (m *NetworkManager) Run(stopCh <-chan struct{}) { klog.Infof("exit network manager run goroutine normally") if m.enableIptables { if err := m.iptablesManager.CleanUpIptablesRules(); err != nil { - klog.Errorf("failed to cleanup iptables, %v", err) + klog.Errorf("could not cleanup iptables, %v", err) } } err := m.ifController.DeleteDummyInterface(m.dummyIfName) if err != nil { - klog.Errorf("failed to delete dummy interface %s, %v", m.dummyIfName, err) + klog.Errorf("could not delete dummy interface %s, %v", m.dummyIfName, err) } else { klog.Infof("remove dummy interface %s successfully", m.dummyIfName) } @@ -79,7 +79,7 @@ func (m *NetworkManager) Run(stopCh <-chan struct{}) { case <-ticker.C: if err := m.configureNetwork(); err != nil { // do nothing here - klog.Warningf("failed to configure network, %v", err) + klog.Warningf("could not configure network, %v", err) } } } diff --git a/pkg/yurthub/proxy/local/faketoken.go b/pkg/yurthub/proxy/local/faketoken.go index 893f6cdbb4a..2ccbd2bb64a 100644 --- a/pkg/yurthub/proxy/local/faketoken.go +++ b/pkg/yurthub/proxy/local/faketoken.go @@ -53,14 +53,14 @@ func WithFakeTokenInject(handler http.Handler, serializerManager *serializer.Ser s := createSerializer(req, tokenRequestGVR, serializerManager) if s == nil { - klog.Errorf("skip fake token inject for request %s when cluster is unhealthy, failed to create serializer.", util.ReqString(req)) + klog.Errorf("skip fake token inject for request %s when cluster is unhealthy, could not create serializer.", util.ReqString(req)) writeRequestDirectly(w, req, buf.Bytes(), n) return } tokenRequset, err := getTokenRequestWithFakeToken(buf.Bytes(), info, req, s) if err != nil { - klog.Errorf("skip fake token inject for request %s when cluster is unhealthy, failed to get token request: %v", util.ReqString(req), err) + klog.Errorf("skip fake token inject for request %s when cluster is unhealthy, could not get token request: %v", util.ReqString(req), err) writeRequestDirectly(w, req, buf.Bytes(), n) return } diff --git a/pkg/yurthub/proxy/local/local.go b/pkg/yurthub/proxy/local/local.go index 8e1304b3bf9..22cf43c7718 100644 --- a/pkg/yurthub/proxy/local/local.go +++ b/pkg/yurthub/proxy/local/local.go @@ -225,7 +225,7 @@ func (lp *LocalProxy) localReqCache(w http.ResponseWriter, req *http.Request) er reqInfo, _ := apirequest.RequestInfoFrom(req.Context()) return apierrors.NewNotFound(schema.GroupResource{Group: reqInfo.APIGroup, Resource: reqInfo.Resource}, reqInfo.Name) } else if err != nil { - klog.Errorf("failed to query cache for %s, %v", hubutil.ReqString(req), err) + klog.Errorf("could not query cache for %s, %v", hubutil.ReqString(req), err) return apierrors.NewInternalError(err) } else if obj == nil { klog.Errorf("no cache object for %s", hubutil.ReqString(req)) diff --git a/pkg/yurthub/proxy/pool/pool.go b/pkg/yurthub/proxy/pool/pool.go index 8e6de82a7ae..bebf7523213 100644 --- a/pkg/yurthub/proxy/pool/pool.go +++ b/pkg/yurthub/proxy/pool/pool.go @@ -86,7 +86,7 @@ func NewYurtCoordinatorProxy( transportMgr, stopCh) if err != nil { - klog.Errorf("failed to create remote proxy for yurt-coordinator, %v", err) + klog.Errorf("could not create remote proxy for yurt-coordinator, %v", err) return } @@ -238,7 +238,7 @@ func (pp *YurtCoordinatorProxy) modifyResponse(resp *http.Response) error { wrapBody, needUncompressed := hubutil.NewGZipReaderCloser(resp.Header, resp.Body, req, "filter") size, filterRc, err := responseFilter.Filter(req, wrapBody, pp.stopCh) if err != nil { - klog.Errorf("failed to filter response for %s, %v", hubutil.ReqString(req), err) + klog.Errorf("could not filter response for %s, %v", hubutil.ReqString(req), err) return err } resp.Body = filterRc @@ -270,7 +270,7 @@ func (pp *YurtCoordinatorProxy) cacheResponse(req *http.Request, resp *http.Resp rc, prc := hubutil.NewDualReadCloser(req, wrapPrc, true) go func(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) { if err := pp.localCacheMgr.CacheResponse(req, prc, stopCh); err != nil { - klog.Errorf("pool proxy failed to cache req %s in local cache, %v", hubutil.ReqString(req), err) + klog.Errorf("pool proxy could not cache req %s in local cache, %v", hubutil.ReqString(req), err) } }(req, prc, ctx.Done()) diff --git a/pkg/yurthub/proxy/proxy.go b/pkg/yurthub/proxy/proxy.go index 5a6a5e0c4c8..58dbfa9362b 100644 --- a/pkg/yurthub/proxy/proxy.go +++ b/pkg/yurthub/proxy/proxy.go @@ -276,7 +276,7 @@ func (p *yurtReverseProxy) subjectAccessReviewHandler(rw http.ResponseWriter, re func isSubjectAccessReviewFromYurtCoordinator(req *http.Request) bool { var buf bytes.Buffer if n, err := buf.ReadFrom(req.Body); err != nil || n == 0 { - klog.Errorf("failed to read SubjectAccessReview from request %s, read %d bytes, %v", hubutil.ReqString(req), n, err) + klog.Errorf("could not read SubjectAccessReview from request %s, read %d bytes, %v", hubutil.ReqString(req), n, err) return false } req.Body = io.NopCloser(&buf) @@ -289,7 +289,7 @@ func isSubjectAccessReviewFromYurtCoordinator(req *http.Request) bool { obj := &v1.SubjectAccessReview{} got, gvk, err := decoder.Decode(buf.Bytes(), nil, obj) if err != nil { - klog.Errorf("failed to decode SubjectAccessReview in request %s, %v", hubutil.ReqString(req), err) + klog.Errorf("could not decode SubjectAccessReview in request %s, %v", hubutil.ReqString(req), err) return false } if (*gvk) != subjectAccessReviewGVK { diff --git a/pkg/yurthub/proxy/remote/loadbalancer.go b/pkg/yurthub/proxy/remote/loadbalancer.go index a8aeda01ea2..08228b13a7e 100644 --- a/pkg/yurthub/proxy/remote/loadbalancer.go +++ b/pkg/yurthub/proxy/remote/loadbalancer.go @@ -292,7 +292,7 @@ func (lb *loadBalancer) modifyResponse(resp *http.Response) error { wrapBody, needUncompressed := hubutil.NewGZipReaderCloser(resp.Header, resp.Body, req, "filter") size, filterRc, err := responseFilter.Filter(req, wrapBody, lb.stopCh) if err != nil { - klog.Errorf("failed to filter response for %s, %v", hubutil.ReqString(req), err) + klog.Errorf("could not filter response for %s, %v", hubutil.ReqString(req), err) return err } resp.Body = filterRc @@ -370,7 +370,7 @@ func (lb *loadBalancer) cacheResponse(req *http.Request, resp *http.Response) { // node does not need. lb.cacheToPool(req, resp, poolCacheManager) } else { - klog.Errorf("failed to cache response for request %s, leader yurthub does not cache non-poolscoped resources.", hubutil.ReqString(req)) + klog.Errorf("could not cache response for request %s, leader yurthub does not cache non-poolscoped resources.", hubutil.ReqString(req)) } } return @@ -388,7 +388,7 @@ func (lb *loadBalancer) cacheToLocal(req *http.Request, resp *http.Response) { rc, prc := hubutil.NewDualReadCloser(req, resp.Body, true) go func(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) { if err := lb.localCacheMgr.CacheResponse(req, prc, stopCh); err != nil { - klog.Errorf("lb failed to cache req %s in local cache, %v", hubutil.ReqString(req), err) + klog.Errorf("lb could not cache req %s in local cache, %v", hubutil.ReqString(req), err) } }(req, prc, ctx.Done()) resp.Body = rc @@ -400,7 +400,7 @@ func (lb *loadBalancer) cacheToPool(req *http.Request, resp *http.Response, pool rc, prc := hubutil.NewDualReadCloser(req, resp.Body, true) go func(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) { if err := poolCacheManager.CacheResponse(req, prc, stopCh); err != nil { - klog.Errorf("lb failed to cache req %s in pool cache, %v", hubutil.ReqString(req), err) + klog.Errorf("lb could not cache req %s in pool cache, %v", hubutil.ReqString(req), err) } }(req, prc, ctx.Done()) resp.Body = rc @@ -412,14 +412,14 @@ func (lb *loadBalancer) cacheToLocalAndPool(req *http.Request, resp *http.Respon rc, prc1, prc2 := hubutil.NewTripleReadCloser(req, resp.Body, true) go func(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) { if err := lb.localCacheMgr.CacheResponse(req, prc, stopCh); err != nil { - klog.Errorf("lb failed to cache req %s in local cache, %v", hubutil.ReqString(req), err) + klog.Errorf("lb could not cache req %s in local cache, %v", hubutil.ReqString(req), err) } }(req, prc1, ctx.Done()) if poolCacheMgr != nil { go func(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) { if err := poolCacheMgr.CacheResponse(req, prc, stopCh); err != nil { - klog.Errorf("lb failed to cache req %s in pool cache, %v", hubutil.ReqString(req), err) + klog.Errorf("lb could not cache req %s in pool cache, %v", hubutil.ReqString(req), err) } }(req, prc2, ctx.Done()) } diff --git a/pkg/yurthub/proxy/util/util.go b/pkg/yurthub/proxy/util/util.go index a6c4b48e028..0d319dcd845 100644 --- a/pkg/yurthub/proxy/util/util.go +++ b/pkg/yurthub/proxy/util/util.go @@ -318,7 +318,7 @@ func WithRequestTimeout(handler http.Handler) http.Handler { if info.Verb == "list" || info.Verb == "watch" { opts := metainternalversion.ListOptions{} if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { - klog.Errorf("failed to decode parameter for list/watch request: %s", util.ReqString(req)) + klog.Errorf("could not decode parameter for list/watch request: %s", util.ReqString(req)) Err(errors.NewBadRequest(err.Error()), w, req) return } @@ -372,7 +372,7 @@ func WithSaTokenSubstitute(handler http.Handler, tenantMgr tenant.Interface) htt } } else { - klog.Errorf("failed to parse tenant ns from token, token %s, sub: %s", oldToken, oldClaim.Subject) + klog.Errorf("could not parse tenant ns from token, token %s, sub: %s", oldToken, oldClaim.Subject) } } } diff --git a/pkg/yurthub/server/nonresource.go b/pkg/yurthub/server/nonresource.go index 78c81a53c01..417c1b8c4af 100644 --- a/pkg/yurthub/server/nonresource.go +++ b/pkg/yurthub/server/nonresource.go @@ -111,7 +111,7 @@ func nonResourceHandler(kubeClient *kubernetes.Clientset, sw cachemanager.Storag } func writeErrResponse(path string, err error, w http.ResponseWriter) { - klog.Errorf("failed to handle %s non resource request, %v", path, err) + klog.Errorf("could not handle %s non resource request, %v", path, err) status := responsewriters.ErrorToAPIStatus(err) output, err := json.Marshal(status) if err != nil { diff --git a/pkg/yurthub/storage/disk/storage.go b/pkg/yurthub/storage/disk/storage.go index b052f6d73d5..25d32d988a5 100644 --- a/pkg/yurthub/storage/disk/storage.go +++ b/pkg/yurthub/storage/disk/storage.go @@ -64,7 +64,7 @@ func NewDiskStorage(dir string) (storage.Store, error) { fsOperator := &fs.FileSystemOperator{} if err := fsOperator.CreateDir(dir); err != nil && err != fs.ErrExists { - return nil, fmt.Errorf("failed to create cache path %s, %v", dir, err) + return nil, fmt.Errorf("could not create cache path %s, %v", dir, err) } // prune suffix "/" of dir @@ -128,7 +128,7 @@ func (ds *diskStorage) Create(key storage.Key, content []byte) error { return storage.ErrKeyExists } if err != nil { - return fmt.Errorf("failed to create file %s, %v", path, err) + return fmt.Errorf("could not create file %s, %v", path, err) } return nil } @@ -151,7 +151,7 @@ func (ds *diskStorage) Delete(key storage.Key) error { return ds.fsOperator.DeleteDir(path) } if err := ds.fsOperator.DeleteFile(path); err != nil { - return fmt.Errorf("failed to delete file %s, %v", path, err) + return fmt.Errorf("could not delete file %s, %v", path, err) } return nil @@ -180,7 +180,7 @@ func (ds *diskStorage) Get(key storage.Key) ([]byte, error) { case fs.ErrIsNotFile: return nil, storage.ErrKeyHasNoContent default: - return buf, fmt.Errorf("failed to read file at %s, %v", path, err) + return buf, fmt.Errorf("could not read file at %s, %v", path, err) } } @@ -206,7 +206,7 @@ func (ds *diskStorage) List(key storage.Key) ([][]byte, error) { for _, filePath := range files { buf, err := ds.fsOperator.Read(filePath) if err != nil { - return nil, fmt.Errorf("failed to read file at %s, %v", filePath, err) + return nil, fmt.Errorf("could not read file at %s, %v", filePath, err) } bb = append(bb, buf) } @@ -216,14 +216,14 @@ func (ds *diskStorage) List(key storage.Key) ([][]byte, error) { case fs.ErrIsNotDir: // possibly it is a regular file, try to read it directly if buf, rerr := ds.fsOperator.Read(absPath); rerr != nil { - return nil, fmt.Errorf("failed to list file at %s, %v", absPath, rerr) + return nil, fmt.Errorf("could not list file at %s, %v", absPath, rerr) } else { bb = append(bb, buf) } return bb, nil default: // err != nil - return nil, fmt.Errorf("failed to get all files under %s, %v", absPath, err) + return nil, fmt.Errorf("could not get all files under %s, %v", absPath, err) } } @@ -253,13 +253,13 @@ func (ds *diskStorage) Update(key storage.Key, content []byte, rv uint64) ([]byt return nil, storage.ErrStorageNotFound } if err != nil { - return nil, fmt.Errorf("failed to read file at %s, %v", absPath, err) + return nil, fmt.Errorf("could not read file at %s, %v", absPath, err) } klog.V(4).Infof("find key %s exists when updating it", storageKey.Key()) ok, err := ds.ifFresherThan(old, rv) if err != nil { - return nil, fmt.Errorf("failed to get rv of file %s, %v", absPath, err) + return nil, fmt.Errorf("could not get rv of file %s, %v", absPath, err) } if !ok { return old, storage.ErrUpdateConflict @@ -268,14 +268,14 @@ func (ds *diskStorage) Update(key storage.Key, content []byte, rv uint64) ([]byt // update the file tmpPath := filepath.Join(ds.baseDir, getTmpKey(storageKey).Key()) if err := ds.fsOperator.Rename(absPath, tmpPath); err != nil { - return nil, fmt.Errorf("failed to backup file %s, %v", absPath, err) + return nil, fmt.Errorf("could not backup file %s, %v", absPath, err) } if err := ds.fsOperator.CreateFile(absPath, content); err != nil { // We can ensure that the file actually exists, so it should not be ErrNotExists - return nil, fmt.Errorf("failed to write to file %s, %v", absPath, err) + return nil, fmt.Errorf("could not write to file %s, %v", absPath, err) } if err := ds.fsOperator.DeleteFile(tmpPath); err != nil { - return nil, fmt.Errorf("failed to delete backup file %s, %v", tmpPath, err) + return nil, fmt.Errorf("could not delete backup file %s, %v", tmpPath, err) } return content, nil } @@ -305,7 +305,7 @@ func (ds *diskStorage) ListResourceKeysOfComponent(component string, gvr schema. return nil, storage.ErrStorageNotFound } if err != nil { - return nil, fmt.Errorf("failed to list files at %s, %v", filepath.Join(ds.baseDir, storageKey.Key()), err) + return nil, fmt.Errorf("could not list files at %s, %v", filepath.Join(ds.baseDir, storageKey.Key()), err) } keys := make([]storage.Key, len(files)) @@ -364,7 +364,7 @@ func (ds *diskStorage) ReplaceComponentList(component string, gvr schema.GroupVe tmpPath := filepath.Join(ds.baseDir, tmpRootKey.Key()) if !fs.IfExists(absPath) { if err := ds.fsOperator.CreateDir(absPath); err != nil { - return fmt.Errorf("failed to create dir at %s", absPath) + return fmt.Errorf("could not create dir at %s", absPath) } if len(contents) == 0 { // nothing need to create, so just return @@ -374,7 +374,7 @@ func (ds *diskStorage) ReplaceComponentList(component string, gvr schema.GroupVe if ok, err := fs.IsDir(absPath); err == nil && !ok { return fmt.Errorf("%s is not a dir", absPath) } else if err != nil { - return fmt.Errorf("failed to check the path %s, %v", absPath, err) + return fmt.Errorf("could not check the path %s, %v", absPath, err) } // absPath exists and is a dir if err := ds.fsOperator.Rename(absPath, tmpPath); err != nil { @@ -386,11 +386,11 @@ func (ds *diskStorage) ReplaceComponentList(component string, gvr schema.GroupVe for key, data := range contents { path := filepath.Join(ds.baseDir, key.Key()) if err := ds.fsOperator.CreateDir(filepath.Dir(path)); err != nil && err != fs.ErrExists { - klog.Errorf("failed to create dir at %s, %v", filepath.Dir(path), err) + klog.Errorf("could not create dir at %s, %v", filepath.Dir(path), err) continue } if err := ds.fsOperator.CreateFile(path, data); err != nil { - klog.Errorf("failed to write data to %s, %v", path, err) + klog.Errorf("could not write data to %s, %v", path, err) continue } klog.V(4).Infof("[diskStorage] ReplaceComponentList store data at %s", path) @@ -416,7 +416,7 @@ func (ds *diskStorage) DeleteComponentResources(component string) error { absKey := filepath.Join(ds.baseDir, rootKey.Key()) if err := ds.fsOperator.DeleteDir(absKey); err != nil { - return fmt.Errorf("failed to delete path %s, %v", absKey, err) + return fmt.Errorf("could not delete path %s, %v", absKey, err) } return nil } @@ -437,11 +437,11 @@ func (ds *diskStorage) SaveClusterInfo(key storage.ClusterInfoKey, content []byt if err == fs.ErrExists { // file exists, overwrite it with content if werr := ds.fsOperator.Write(path, content); werr != nil { - return fmt.Errorf("failed to update clusterInfo %s at path %s, %v", key.ClusterInfoType, path, werr) + return fmt.Errorf("could not update clusterInfo %s at path %s, %v", key.ClusterInfoType, path, werr) } return nil } - return fmt.Errorf("failed to create %s clusterInfo file at path %s, %v", key.ClusterInfoType, path, err) + return fmt.Errorf("could not create %s clusterInfo file at path %s, %v", key.ClusterInfoType, path, err) } return nil } @@ -464,7 +464,7 @@ func (ds *diskStorage) GetClusterInfo(key storage.ClusterInfoKey) ([]byte, error if err == fs.ErrNotExists { return nil, storage.ErrStorageNotFound } - return nil, fmt.Errorf("failed to read %s clusterInfo file at %s, %v", key.ClusterInfoType, path, err) + return nil, fmt.Errorf("could not read %s clusterInfo file at %s, %v", key.ClusterInfoType, path, err) } return buf, nil } @@ -492,12 +492,12 @@ func (ds *diskStorage) Recover() error { switch { case info.Mode().IsDir(): if err := ds.recoverDir(path); err != nil { - return fmt.Errorf("failed to recover dir %s, %v", path, err) + return fmt.Errorf("could not recover dir %s, %v", path, err) } recoveredDir[path] = struct{}{} case info.Mode().IsRegular(): if err := ds.recoverFile(path); err != nil { - return fmt.Errorf("failed to recover file %s, %v", path, err) + return fmt.Errorf("could not recover file %s, %v", path, err) } default: klog.Warningf("unrecognized file %s when recovering diskStorage", path) @@ -523,7 +523,7 @@ func (ds *diskStorage) recoverFile(tmpPath string) error { return fmt.Errorf("failed at origin path %s, isRegularFile: %v, error: %v", path, ok, err) } if err := ds.fsOperator.DeleteFile(path); err != nil { - return fmt.Errorf("failed to delete file at %s, %v", path, err) + return fmt.Errorf("could not delete file at %s, %v", path, err) } } if err := ds.fsOperator.Rename(tmpPath, path); err != nil { @@ -545,7 +545,7 @@ func (ds *diskStorage) recoverDir(tmpPath string) error { return fmt.Errorf("failed at origin path %s, isDir: %v, error: %v", path, ok, err) } if err := ds.fsOperator.DeleteDir(path); err != nil { - return fmt.Errorf("failed to delete dir at %s, %v", path, err) + return fmt.Errorf("could not delete dir at %s, %v", path, err) } } if err := ds.fsOperator.Rename(tmpPath, path); err != nil { @@ -585,11 +585,11 @@ func (ds *diskStorage) ifFresherThan(oldObj []byte, newRV uint64) (bool, error) unstructuredObj := &unstructured.Unstructured{} curObj, _, err := ds.serializer.Decode(oldObj, nil, unstructuredObj) if err != nil { - return false, fmt.Errorf("failed to decode obj, %v", err) + return false, fmt.Errorf("could not decode obj, %v", err) } curRv, err := ObjectResourceVersion(curObj) if err != nil { - return false, fmt.Errorf("failed to get rv of obj, %v", err) + return false, fmt.Errorf("could not get rv of obj, %v", err) } if newRV < curRv { return false, nil @@ -606,7 +606,7 @@ func (ds *diskStorage) unLockKey(key storageKey) { func ifEnhancement(baseDir string, fsOperator fs.FileSystemOperator) (bool, error) { compDirs, err := fsOperator.List(baseDir, fs.ListModeDirs, false) if err != nil { - return false, fmt.Errorf("failed to list dirs under %s, %v", baseDir, err) + return false, fmt.Errorf("could not list dirs under %s, %v", baseDir, err) } for _, compDir := range compDirs { @@ -618,7 +618,7 @@ func ifEnhancement(baseDir string, fsOperator fs.FileSystemOperator) (bool, erro resDirs, err := fsOperator.List(compDir, fs.ListModeDirs, false) if err != nil { - return false, fmt.Errorf("failed to list dirs under %s, %v", compDir, err) + return false, fmt.Errorf("could not list dirs under %s, %v", compDir, err) } for _, resDir := range resDirs { diff --git a/pkg/yurthub/storage/etcd/keycache.go b/pkg/yurthub/storage/etcd/keycache.go index ac14ee727c1..7d7970d03f6 100644 --- a/pkg/yurthub/storage/etcd/keycache.go +++ b/pkg/yurthub/storage/etcd/keycache.go @@ -79,24 +79,24 @@ func (c *componentKeyCache) Recover() error { var err error if buf, err = c.fsOperator.Read(c.filePath); err == fs.ErrNotExists { if err := c.fsOperator.CreateFile(c.filePath, []byte{}); err != nil { - return fmt.Errorf("failed to create cache file at %s, %v", c.filePath, err) + return fmt.Errorf("could not create cache file at %s, %v", c.filePath, err) } } else if err != nil { - return fmt.Errorf("failed to recover key cache from %s, %v", c.filePath, err) + return fmt.Errorf("could not recover key cache from %s, %v", c.filePath, err) } if len(buf) != 0 { // We've got content from file cache, err := unmarshal(buf) if err != nil { - return fmt.Errorf("failed to parse file content at %s, %v", c.filePath, err) + return fmt.Errorf("could not parse file content at %s, %v", c.filePath, err) } c.cache = cache } poolScopedKeyset, err := c.getPoolScopedKeyset() if err != nil { - return fmt.Errorf("failed to get pool-scoped keys, %v", err) + return fmt.Errorf("could not get pool-scoped keys, %v", err) } // Overwrite the data we recovered from local disk, if any. Because we // only respect to the resources stored in yurt-coordinator to recover the @@ -121,17 +121,17 @@ func (c *componentKeyCache) getPoolScopedKeyset() (*keyCache, error) { Resources: gvr.Resource, }) if err != nil { - return nil, fmt.Errorf("failed to generate keys for %s, %v", gvr.String(), err) + return nil, fmt.Errorf("could not generate keys for %s, %v", gvr.String(), err) } getResp, err := getFunc(rootKey.Key()) if err != nil { - return nil, fmt.Errorf("failed to get from etcd for %s, %v", gvr.String(), err) + return nil, fmt.Errorf("could not get from etcd for %s, %v", gvr.String(), err) } for _, kv := range getResp.Kvs { ns, name, err := getNamespaceAndNameFromKeyPath(string(kv.Key)) if err != nil { - return nil, fmt.Errorf("failed to parse namespace and name of %s", kv.Key) + return nil, fmt.Errorf("could not parse namespace and name of %s", kv.Key) } key, err := c.keyFunc(storage.KeyBuildInfo{ Component: coordinatorconstants.DefaultPoolScopedUserAgent, @@ -142,7 +142,7 @@ func (c *componentKeyCache) getPoolScopedKeyset() (*keyCache, error) { Name: name, }) if err != nil { - return nil, fmt.Errorf("failed to create resource key for %v", kv.Key) + return nil, fmt.Errorf("could not create resource key for %v", kv.Key) } if _, ok := keys.m[gvr]; !ok { @@ -258,7 +258,7 @@ func (c *componentKeyCache) LoadAndDelete(component string) (keyCache, bool) { func (c *componentKeyCache) flush() error { buf := marshal(c.cache) if err := c.fsOperator.Write(c.filePath, buf); err != nil { - return fmt.Errorf("failed to flush cache to file %s, %v", c.filePath, err) + return fmt.Errorf("could not flush cache to file %s, %v", c.filePath, err) } return nil } @@ -299,7 +299,7 @@ func unmarshal(buf []byte) (map[string]keyCache, error) { for i, l := range lines { s := strings.Split(l, "#") if len(s) != 2 { - return nil, fmt.Errorf("failed to parse line %d, invalid format", i) + return nil, fmt.Errorf("could not parse line %d, invalid format", i) } comp := s[0] @@ -309,11 +309,11 @@ func unmarshal(buf []byte) (map[string]keyCache, error) { for _, gvrKey := range gvrKeys { ss := strings.Split(gvrKey, ":") if len(ss) != 2 { - return nil, fmt.Errorf("failed to parse gvr keys %s at line %d, invalid format", gvrKey, i) + return nil, fmt.Errorf("could not parse gvr keys %s at line %d, invalid format", gvrKey, i) } gvrStrs := strings.Split(ss[0], "_") if len(gvrStrs) != 3 { - return nil, fmt.Errorf("failed to parse gvr %s at line %d, invalid format", ss[0], i) + return nil, fmt.Errorf("could not parse gvr %s at line %d, invalid format", ss[0], i) } gvr := schema.GroupVersionResource{ Group: gvrStrs[0], diff --git a/pkg/yurthub/storage/etcd/storage.go b/pkg/yurthub/storage/etcd/storage.go index 39553a8708b..2164f18a4b3 100644 --- a/pkg/yurthub/storage/etcd/storage.go +++ b/pkg/yurthub/storage/etcd/storage.go @@ -104,7 +104,7 @@ func NewStorage(ctx context.Context, cfg *EtcdStorageConfig) (storage.Store, err tlsConfig, err = tlsInfo.ClientConfig() if err != nil { - return nil, fmt.Errorf("failed to create tls config for etcd client, %v", err) + return nil, fmt.Errorf("could not create tls config for etcd client, %v", err) } } @@ -118,7 +118,7 @@ func NewStorage(ctx context.Context, cfg *EtcdStorageConfig) (storage.Store, err client, err := clientv3.New(clientConfig) if err != nil { - return nil, fmt.Errorf("failed to create etcd client, %v", err) + return nil, fmt.Errorf("could not create etcd client, %v", err) } s := &etcdStorage{ @@ -142,9 +142,9 @@ func NewStorage(ctx context.Context, cfg *EtcdStorageConfig) (storage.Store, err } if err := cache.Recover(); err != nil { if err := client.Close(); err != nil { - return nil, fmt.Errorf("failed to close etcd client, %v", err) + return nil, fmt.Errorf("could not close etcd client, %v", err) } - return nil, fmt.Errorf("failed to recover component key cache from %s, %v", cacheFilePath, err) + return nil, fmt.Errorf("could not recover component key cache from %s, %v", cacheFilePath, err) } s.localComponentKeyCache = cache @@ -172,7 +172,7 @@ func (s *etcdStorage) clientLifeCycleManagement() { if client, err := clientv3.New(s.clientConfig); err == nil { klog.Infof("client reconnected to etcd server, %s", client.ActiveConnection().GetState().String()) if err := s.client.Close(); err != nil { - klog.Errorf("failed to close old client, %v", err) + klog.Errorf("could not close old client, %v", err) } s.client = client return @@ -186,7 +186,7 @@ func (s *etcdStorage) clientLifeCycleManagement() { select { case <-s.ctx.Done(): if err := s.client.Close(); err != nil { - klog.Errorf("failed to close etcd client, %v", err) + klog.Errorf("could not close etcd client, %v", err) } klog.Info("etcdstorage lifecycle routine exited") return @@ -215,7 +215,7 @@ func (s *etcdStorage) Create(key storage.Key, content []byte) error { keyStr := key.Key() originRv, err := getRvOfObject(content) if err != nil { - return fmt.Errorf("failed to get rv from content when creating %s, %v", keyStr, err) + return fmt.Errorf("could not get rv from content when creating %s, %v", keyStr, err) } ctx, cancel := context.WithTimeout(s.ctx, defaultTimeout) @@ -403,7 +403,7 @@ func (s *etcdStorage) ReplaceComponentList(component string, gvr schema.GroupVer for k := range addedOrUpdated { rv, err := getRvOfObject(contents[k]) if err != nil { - klog.Errorf("failed to process %s in list object, %v", k.Key(), err) + klog.Errorf("could not process %s in list object, %v", k.Key(), err) continue } createOrUpdateOp := clientv3.OpTxn( diff --git a/pkg/yurthub/tenant/tenant.go b/pkg/yurthub/tenant/tenant.go index 477a6207480..d5863b0ebd3 100644 --- a/pkg/yurthub/tenant/tenant.go +++ b/pkg/yurthub/tenant/tenant.go @@ -113,7 +113,7 @@ func (mgr *tenantManager) addSecret(sec interface{}) { secret, ok := sec.(*v1.Secret) if !ok { - klog.Errorf("failed to convert to *v1.Secret") + klog.Errorf("could not convert to *v1.Secret") return } @@ -129,7 +129,7 @@ func (mgr *tenantManager) addSecret(sec interface{}) { func (mgr *tenantManager) deleteSecret(sec interface{}) { secret, ok := sec.(*v1.Secret) if !ok { - klog.Errorf("failed to convert to *v1.Secret") + klog.Errorf("could not convert to *v1.Secret") return } @@ -144,7 +144,7 @@ func (mgr *tenantManager) updateSecret(oldSec interface{}, newSec interface{}) { secret, ok := newSec.(*v1.Secret) if !ok { - klog.Errorf("failed to convert to *v1.Secret") + klog.Errorf("could not convert to *v1.Secret") return } diff --git a/pkg/yurthub/util/connrotation.go b/pkg/yurthub/util/connrotation.go index c2aeea55d2f..fadc486b807 100644 --- a/pkg/yurthub/util/connrotation.go +++ b/pkg/yurthub/util/connrotation.go @@ -128,7 +128,7 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net. d.mu.Lock() size := len(d.addrConns[address]) d.mu.Unlock() - klog.Infof("%s dialer failed to dial: %v, and total connections: %d", d.name, err, size) + klog.Infof("%s dialer could not dial: %v, and total connections: %d", d.name, err, size) } return nil, err } diff --git a/pkg/yurthub/util/fs/store.go b/pkg/yurthub/util/fs/store.go index bec2da6364f..16b339ba73d 100644 --- a/pkg/yurthub/util/fs/store.go +++ b/pkg/yurthub/util/fs/store.go @@ -109,7 +109,7 @@ func (fs *FileSystemOperator) List(rootDir string, mode ListMode, isRecursive bo info, err := d.Info() if err != nil { - return fmt.Errorf("failed to get info for entry %s, %v", path, err) + return fmt.Errorf("could not get info for entry %s, %v", path, err) } switch { diff --git a/pkg/yurthub/util/util.go b/pkg/yurthub/util/util.go index a291d7ef376..65e63c547c2 100644 --- a/pkg/yurthub/util/util.go +++ b/pkg/yurthub/util/util.go @@ -236,11 +236,11 @@ func (dr *tripleReadCloser) Read(p []byte) (n int, err error) { var n1, n2 int var err error if n1, err = dr.pw1.Write(p[:n]); err != nil { - klog.Errorf("tripleReader: failed to write to pw1 %v", err) + klog.Errorf("tripleReader: could not write to pw1 %v", err) return n1, err } if n2, err = dr.pw2.Write(p[:n]); err != nil { - klog.Errorf("tripleReader: failed to write to pw2 %v", err) + klog.Errorf("tripleReader: could not write to pw2 %v", err) return n2, err } } @@ -266,7 +266,7 @@ func (dr *tripleReadCloser) Close() error { } if len(errs) != 0 { - return fmt.Errorf("failed to close dualReader, %v", errs) + return fmt.Errorf("could not close dualReader, %v", errs) } return nil @@ -312,7 +312,7 @@ func (dr *dualReadCloser) Read(p []byte) (n int, err error) { n, err = dr.rc.Read(p) if n > 0 { if n, err := dr.pw.Write(p[:n]); err != nil { - klog.Errorf("dualReader: failed to write %v", err) + klog.Errorf("dualReader: could not write %v", err) return n, err } } @@ -334,7 +334,7 @@ func (dr *dualReadCloser) Close() error { } if len(errs) != 0 { - return fmt.Errorf("failed to close dualReader, %v", errs) + return fmt.Errorf("could not close dualReader, %v", errs) } return nil diff --git a/pkg/yurthub/yurtcoordinator/certmanager/certmanager.go b/pkg/yurthub/yurtcoordinator/certmanager/certmanager.go index 144f620da60..6e914ef80d2 100644 --- a/pkg/yurthub/yurtcoordinator/certmanager/certmanager.go +++ b/pkg/yurthub/yurtcoordinator/certmanager/certmanager.go @@ -57,7 +57,7 @@ var certFileNames = map[CertFileType]string{ func NewCertManager(pkiDir, yurtHubNs string, yurtClient kubernetes.Interface, informerFactory informers.SharedInformerFactory) (*CertManager, error) { store := fs.FileSystemOperator{} if err := store.CreateDir(pkiDir); err != nil && err != fs.ErrExists { - return nil, fmt.Errorf("failed to create dir %s, %v", pkiDir, err) + return nil, fmt.Errorf("could not create dir %s, %v", pkiDir, err) } certMgr := &CertManager{ @@ -139,7 +139,7 @@ func (c *CertManager) updateCerts(secret *corev1.Secret) { var coordinatorCert, nodeLeaseProxyCert *tls.Certificate if cook { if cert, err := tls.X509KeyPair(coordinatorClientCrt, coordinatorClientKey); err != nil { - klog.Errorf("failed to create tls certificate for coordinator, %v", err) + klog.Errorf("could not create tls certificate for coordinator, %v", err) } else { coordinatorCert = &cert } @@ -147,7 +147,7 @@ func (c *CertManager) updateCerts(secret *corev1.Secret) { if nook { if cert, err := tls.X509KeyPair(nodeLeaseProxyClientCrt, nodeLeaseProxyClientKey); err != nil { - klog.Errorf("failed to create tls certificate for node lease proxy, %v", err) + klog.Errorf("could not create tls certificate for node lease proxy, %v", err) } else { nodeLeaseProxyCert = &cert } @@ -160,27 +160,27 @@ func (c *CertManager) updateCerts(secret *corev1.Secret) { if caok { klog.Infof("updating coordinator ca cert") if err := c.createOrUpdateFile(c.GetFilePath(RootCA), ca); err != nil { - klog.Errorf("failed to update ca, %v", err) + klog.Errorf("could not update ca, %v", err) } } if cook { klog.Infof("updating yurt-coordinator-yurthub client cert and key") if err := c.createOrUpdateFile(c.GetFilePath(YurthubClientKey), coordinatorClientKey); err != nil { - klog.Errorf("failed to update coordinator client key, %v", err) + klog.Errorf("could not update coordinator client key, %v", err) } if err := c.createOrUpdateFile(c.GetFilePath(YurthubClientCert), coordinatorClientCrt); err != nil { - klog.Errorf("failed to update coordinator client cert, %v", err) + klog.Errorf("could not update coordinator client cert, %v", err) } } if nook { klog.Infof("updating node-lease-proxy-client cert and key") if err := c.createOrUpdateFile(c.GetFilePath(NodeLeaseProxyClientKey), nodeLeaseProxyClientKey); err != nil { - klog.Errorf("failed to update node lease proxy client key, %v", err) + klog.Errorf("could not update node lease proxy client key, %v", err) } if err := c.createOrUpdateFile(c.GetFilePath(NodeLeaseProxyClientCert), nodeLeaseProxyClientCrt); err != nil { - klog.Errorf("failed to update node lease proxy client cert, %v", err) + klog.Errorf("could not update node lease proxy client cert, %v", err) } } @@ -199,10 +199,10 @@ func (c *CertManager) deleteCerts() { func (c *CertManager) createOrUpdateFile(path string, data []byte) error { if err := c.store.Write(path, data); err == fs.ErrNotExists { if err := c.store.CreateFile(path, data); err != nil { - return fmt.Errorf("failed to create file at %s, %v", path, err) + return fmt.Errorf("could not create file at %s, %v", path, err) } } else if err != nil { - return fmt.Errorf("failed to update file at %s, %v", path, err) + return fmt.Errorf("could not update file at %s, %v", path, err) } return nil } diff --git a/pkg/yurthub/yurtcoordinator/coordinator.go b/pkg/yurthub/yurtcoordinator/coordinator.go index 707c0f5bcc6..591660d1225 100644 --- a/pkg/yurthub/yurtcoordinator/coordinator.go +++ b/pkg/yurthub/yurtcoordinator/coordinator.go @@ -148,7 +148,7 @@ func NewCoordinator( } coordinatorClient, err := kubernetes.NewForConfig(coordinatorRESTCfg) if err != nil { - return nil, fmt.Errorf("failed to create client for yurt coordinator, %v", err) + return nil, fmt.Errorf("could not create client for yurt coordinator, %v", err) } coordinator := &coordinator{ @@ -188,7 +188,7 @@ func NewCoordinator( proxiedClient, err := buildProxiedClientWithUserAgent(fmt.Sprintf("http://%s", cfg.YurtHubProxyServerAddr), constants.DefaultPoolScopedUserAgent) if err != nil { - return nil, fmt.Errorf("failed to create proxied client, %v", err) + return nil, fmt.Errorf("could not create proxied client, %v", err) } // init pool scope resources @@ -196,7 +196,7 @@ func NewCoordinator( dynamicClient, err := buildDynamicClientWithUserAgent(fmt.Sprintf("http://%s", cfg.YurtHubProxyServerAddr), constants.DefaultPoolScopedUserAgent) if err != nil { - return nil, fmt.Errorf("failed to create dynamic client, %v", err) + return nil, fmt.Errorf("could not create dynamic client, %v", err) } poolScopedCacheSyncManager := &poolScopedCacheSyncManager{ @@ -283,13 +283,13 @@ func (coordinator *coordinator) Run() { case LeaderHub: poolCacheManager, etcdStorage, cancelEtcdStorage, err = coordinator.buildPoolCacheStore() if err != nil { - klog.Errorf("failed to create pool scoped cache store and manager, %v", err) + klog.Errorf("could not create pool scoped cache store and manager, %v", err) coordinator.statusInfoChan <- electorStatusInfo continue } if err := coordinator.poolCacheSyncManager.EnsureStart(); err != nil { - klog.Errorf("failed to sync pool-scoped resource, %v", err) + klog.Errorf("could not sync pool-scoped resource, %v", err) cancelEtcdStorage() coordinator.statusInfoChan <- electorStatusInfo continue @@ -320,7 +320,7 @@ func (coordinator *coordinator) Run() { if coordinator.needUploadLocalCache { if err := coordinator.uploadLocalCache(etcdStorage); err != nil { - klog.Errorf("failed to upload local cache when yurthub becomes leader, %v", err) + klog.Errorf("could not upload local cache when yurthub becomes leader, %v", err) } else { needUploadLocalCache = false } @@ -328,7 +328,7 @@ func (coordinator *coordinator) Run() { case FollowerHub: poolCacheManager, etcdStorage, cancelEtcdStorage, err = coordinator.buildPoolCacheStore() if err != nil { - klog.Errorf("failed to create pool scoped cache store and manager, %v", err) + klog.Errorf("could not create pool scoped cache store and manager, %v", err) coordinator.statusInfoChan <- electorStatusInfo continue } @@ -339,7 +339,7 @@ func (coordinator *coordinator) Run() { if coordinator.needUploadLocalCache { if err := coordinator.uploadLocalCache(etcdStorage); err != nil { - klog.Errorf("failed to upload local cache when yurthub becomes follower, %v", err) + klog.Errorf("could not upload local cache when yurthub becomes follower, %v", err) } else { needUploadLocalCache = false } @@ -400,7 +400,7 @@ func (coordinator *coordinator) buildPoolCacheStore() (cachemanager.CacheManager etcdStore, err := etcd.NewStorage(ctx, coordinator.etcdStorageCfg) if err != nil { cancel() - return nil, nil, nil, fmt.Errorf("failed to create etcd storage, %v", err) + return nil, nil, nil, fmt.Errorf("could not create etcd storage, %v", err) } poolCacheManager := cachemanager.NewCacheManager( cachemanager.NewStorageWrapper(etcdStore), @@ -418,9 +418,9 @@ func (coordinator *coordinator) getEtcdStore() storage.Store { func (coordinator *coordinator) newNodeLeaseProxyClient() (coordclientset.LeaseInterface, error) { healthyCloudServer, err := coordinator.cloudHealthChecker.PickHealthyServer() if err != nil { - return nil, fmt.Errorf("failed to get a healthy cloud APIServer, %v", err) + return nil, fmt.Errorf("could not get a healthy cloud APIServer, %v", err) } else if healthyCloudServer == nil { - return nil, fmt.Errorf("failed to get a healthy cloud APIServer, all server are unhealthy") + return nil, fmt.Errorf("could not get a healthy cloud APIServer, all server are unhealthy") } restCfg := &rest.Config{ Host: healthyCloudServer.String(), @@ -433,7 +433,7 @@ func (coordinator *coordinator) newNodeLeaseProxyClient() (coordclientset.LeaseI } cloudClient, err := kubernetes.NewForConfig(restCfg) if err != nil { - return nil, fmt.Errorf("failed to create cloud client, %v", err) + return nil, fmt.Errorf("could not create cloud client, %v", err) } return cloudClient.CoordinationV1().Leases(corev1.NamespaceNodeLease), nil @@ -458,7 +458,7 @@ func (coordinator *coordinator) delegateNodeLease(cloudLeaseClient coordclientse cloudLease, err := cloudLeaseClient.Get(coordinator.ctx, newLease.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { if _, err := cloudLeaseClient.Create(coordinator.ctx, cloudLease, metav1.CreateOptions{}); err != nil { - klog.Errorf("failed to create lease %s at cloud, %v", newLease.Name, err) + klog.Errorf("could not create lease %s at cloud, %v", newLease.Name, err) continue } } @@ -466,7 +466,7 @@ func (coordinator *coordinator) delegateNodeLease(cloudLeaseClient coordclientse cloudLease.Annotations = newLease.Annotations cloudLease.Spec.RenewTime = newLease.Spec.RenewTime if updatedLease, err := cloudLeaseClient.Update(coordinator.ctx, cloudLease, metav1.UpdateOptions{}); err != nil { - klog.Errorf("failed to update lease %s at cloud, %v", newLease.Name, err) + klog.Errorf("could not update lease %s at cloud, %v", newLease.Name, err) continue } else { klog.V(2).Infof("delegate node lease for %s", updatedLease.Name) @@ -498,7 +498,7 @@ func (p *poolScopedCacheSyncManager) EnsureStart() error { if !p.isRunning { err := p.coordinatorClient.CoordinationV1().Leases(namespaceInformerLease).Delete(p.ctx, nameInformerLease, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to delete informer sync lease, %v", err) + return fmt.Errorf("could not delete informer sync lease, %v", err) } etcdStore := p.getEtcdStore() @@ -506,7 +506,7 @@ func (p *poolScopedCacheSyncManager) EnsureStart() error { return fmt.Errorf("got empty etcd storage") } if err := etcdStore.DeleteComponentResources(constants.DefaultPoolScopedUserAgent); err != nil { - return fmt.Errorf("failed to clean old pool-scoped cache, %v", err) + return fmt.Errorf("could not clean old pool-scoped cache, %v", err) } ctx, cancel := context.WithCancel(p.ctx) @@ -546,7 +546,7 @@ func (p *poolScopedCacheSyncManager) holdInformerSync(ctx context.Context, hasIn p.renewInformerLease(ctx, informerLease) return } - klog.Error("failed to wait for cache synced, it was canceled") + klog.Error("could not wait for cache synced, it was canceled") } func (p *poolScopedCacheSyncManager) renewInformerLease(ctx context.Context, lease informerLease) { @@ -559,7 +559,7 @@ func (p *poolScopedCacheSyncManager) renewInformerLease(ctx context.Context, lea case <-t.C: newLease, err := lease.Update(p.informerSyncedLease) if err != nil { - klog.Errorf("failed to update informer lease, %v", err) + klog.Errorf("could not update informer lease, %v", err) continue } p.informerSyncedLease = newLease @@ -613,12 +613,12 @@ func (l *localCacheUploader) Upload() { for k, b := range objBytes { rv, err := getRv(b) if err != nil { - klog.Errorf("failed to get name from bytes %s, %v", string(b), err) + klog.Errorf("could not get name from bytes %s, %v", string(b), err) continue } if err := l.createOrUpdate(k, b, rv); err != nil { - klog.Errorf("failed to upload %s, %v", k.Key(), err) + klog.Errorf("could not upload %s, %v", k.Key(), err) } } } @@ -648,25 +648,25 @@ func (l *localCacheUploader) resourcesToUpload() map[storage.Key][]byte { } localKeys, err := l.diskStorage.ListResourceKeysOfComponent(info.Component, gvr) if err != nil { - klog.Errorf("failed to get object keys from disk for %s, %v", gvr.String(), err) + klog.Errorf("could not get object keys from disk for %s, %v", gvr.String(), err) continue } for _, k := range localKeys { buf, err := l.diskStorage.Get(k) if err != nil { - klog.Errorf("failed to read local cache of key %s, %v", k.Key(), err) + klog.Errorf("could not read local cache of key %s, %v", k.Key(), err) continue } buildInfo, err := disk.ExtractKeyBuildInfo(k) if err != nil { - klog.Errorf("failed to extract key build info from local cache of key %s, %v", k.Key(), err) + klog.Errorf("could not extract key build info from local cache of key %s, %v", k.Key(), err) continue } poolCacheKey, err := l.etcdStorage.KeyFunc(*buildInfo) if err != nil { - klog.Errorf("failed to generate pool cache key from local cache key %s, %v", k.Key(), err) + klog.Errorf("could not generate pool cache key from local cache key %s, %v", k.Key(), err) continue } objBytes[poolCacheKey] = buf @@ -749,12 +749,12 @@ func (p *poolCacheSyncedDetector) detectPoolCacheSynced(obj interface{}) { func getRv(objBytes []byte) (uint64, error) { obj := &unstructured.Unstructured{} if err := json.Unmarshal(objBytes, obj); err != nil { - return 0, fmt.Errorf("failed to unmarshal json: %v", err) + return 0, fmt.Errorf("could not unmarshal json: %v", err) } rv, err := strconv.ParseUint(obj.GetResourceVersion(), 10, 64) if err != nil { - return 0, fmt.Errorf("failed to parse rv %s of pod %s, %v", obj.GetName(), obj.GetResourceVersion(), err) + return 0, fmt.Errorf("could not parse rv %s of pod %s, %v", obj.GetName(), obj.GetResourceVersion(), err) } return rv, nil diff --git a/pkg/yurthub/yurtcoordinator/informer_lease.go b/pkg/yurthub/yurtcoordinator/informer_lease.go index bc42e6064f8..ea2de15de8a 100644 --- a/pkg/yurthub/yurtcoordinator/informer_lease.go +++ b/pkg/yurthub/yurtcoordinator/informer_lease.go @@ -171,7 +171,7 @@ func (nl *informerLeaseTmpl) newLease(base *coordinationv1.Lease) *coordinationv }, } } else { - klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", nl.leaseName, err) + klog.Errorf("could not get node %q when trying to set owner ref to the node lease: %v", nl.leaseName, err) } } return lease diff --git a/pkg/yurtiotdock/clients/edgex-foundry/v2/device_client.go b/pkg/yurtiotdock/clients/edgex-foundry/v2/device_client.go index 771d1659f43..eacf28f7bae 100644 --- a/pkg/yurtiotdock/clients/edgex-foundry/v2/device_client.go +++ b/pkg/yurtiotdock/clients/edgex-foundry/v2/device_client.go @@ -127,7 +127,7 @@ func (efc *EdgexDeviceClient) Update(ctx context.Context, device *iotv1alpha1.De if err != nil { return nil, err } else if rep.StatusCode() != http.StatusMultiStatus { - return nil, fmt.Errorf("failed to update device: %s, get response: %s", actualDeviceName, string(rep.Body())) + return nil, fmt.Errorf("could not update device: %s, get response: %s", actualDeviceName, string(rep.Body())) } return device, nil } @@ -257,12 +257,12 @@ func (efc *EdgexDeviceClient) UpdatePropertyState(ctx context.Context, propertyN if err != nil { return err } else if rep.StatusCode() != http.StatusOK { - return fmt.Errorf("failed to set property: %s, get response: %s", dps.Name, string(rep.Body())) + return fmt.Errorf("could not set property: %s, get response: %s", dps.Name, string(rep.Body())) } else if rep.Body() != nil { // If the parameters are illegal, such as out of range, the 200 status code is also returned, but the description appears in the body a := string(rep.Body()) if strings.Contains(a, "execWriteCmd") { - return fmt.Errorf("failed to set property: %s, get response: %s", dps.Name, string(rep.Body())) + return fmt.Errorf("could not set property: %s, get response: %s", dps.Name, string(rep.Body())) } } return nil @@ -310,7 +310,7 @@ func (efc *EdgexDeviceClient) ListPropertiesState(ctx context.Context, device *i } else { var eResp edgex_resp.EventResponse if err := json.Unmarshal(resp.Body(), &eResp); err != nil { - klog.V(5).ErrorS(err, "failed to decode the response ", "response", resp) + klog.V(5).ErrorS(err, "could not decode the response ", "response", resp) continue } event := eResp.Event diff --git a/pkg/yurtiotdock/clients/edgex-foundry/v2/deviceservice_client.go b/pkg/yurtiotdock/clients/edgex-foundry/v2/deviceservice_client.go index 48d74e223ce..36a646462a3 100644 --- a/pkg/yurtiotdock/clients/edgex-foundry/v2/deviceservice_client.go +++ b/pkg/yurtiotdock/clients/edgex-foundry/v2/deviceservice_client.go @@ -102,7 +102,7 @@ func (eds *EdgexDeviceServiceClient) Update(ctx context.Context, ds *v1alpha1.De } if ds.Status.EdgeId == "" { - return nil, fmt.Errorf("failed to update deviceservice %s with empty edgex id", ds.Name) + return nil, fmt.Errorf("could not update deviceservice %s with empty edgex id", ds.Name) } edgeDs := toEdgexDeviceService(ds) edgeDs.Id = ds.Status.EdgeId diff --git a/pkg/yurtiotdock/controllers/device_controller.go b/pkg/yurtiotdock/controllers/device_controller.go index 444759de006..a6132a9c2ef 100644 --- a/pkg/yurtiotdock/controllers/device_controller.go +++ b/pkg/yurtiotdock/controllers/device_controller.go @@ -176,14 +176,14 @@ func (r *DeviceReconciler) reconcileCreateDevice(ctx context.Context, d *iotv1al createdEdgeObj, err := r.deviceCli.Create(context.TODO(), d, clients.CreateOptions{}) if err != nil { util.SetDeviceCondition(deviceStatus, util.NewDeviceCondition(iotv1alpha1.DeviceSyncedCondition, corev1.ConditionFalse, iotv1alpha1.DeviceCreateSyncedReason, err.Error())) - return fmt.Errorf("fail to add Device to edge platform: %v", err) + return fmt.Errorf("could not add Device to edge platform: %v", err) } else { klog.V(4).Infof("Successfully add Device to edge platform, Name: %s, EdgeId: %s", edgeDeviceName, createdEdgeObj.Status.EdgeId) newDeviceStatus.EdgeId = createdEdgeObj.Status.EdgeId newDeviceStatus.Synced = true } } else { - klog.V(4).ErrorS(err, "failed to visit the edge platform") + klog.V(4).ErrorS(err, "could not visit the edge platform") util.SetDeviceCondition(deviceStatus, util.NewDeviceCondition(iotv1alpha1.DeviceSyncedCondition, corev1.ConditionFalse, iotv1alpha1.DeviceVistedCoreMetadataSyncedReason, "")) return nil } @@ -234,7 +234,7 @@ func (r *DeviceReconciler) reconcileUpdateDevice(ctx context.Context, d *iotv1al util.SetDeviceCondition(deviceStatus, util.NewDeviceCondition(iotv1alpha1.DeviceManagingCondition, corev1.ConditionFalse, iotv1alpha1.DeviceUpdateStateReason, err.Error())) return err } else if len(failedPropertyNames) != 0 { - err = fmt.Errorf("the following device properties failed to reconcile: %v", failedPropertyNames) + err = fmt.Errorf("the following device properties could not reconcile: %v", failedPropertyNames) util.SetDeviceCondition(deviceStatus, util.NewDeviceCondition(iotv1alpha1.DeviceManagingCondition, corev1.ConditionFalse, err.Error(), "")) return nil } @@ -261,7 +261,7 @@ func (r *DeviceReconciler) reconcileDeviceProperties(d *iotv1alpha1.Device, devi actualProperty, err := r.deviceCli.GetPropertyState(context.TODO(), propertyName, d, clients.GetOptions{}) if err != nil { if !clients.IsNotFoundErr(err) { - klog.Errorf("DeviceName: %s, failed to get actual property value of %s, err:%v", d.GetName(), propertyName, err) + klog.Errorf("DeviceName: %s, could not get actual property value of %s, err:%v", d.GetName(), propertyName, err) failedPropertyNames = append(failedPropertyNames, propertyName) continue } @@ -282,7 +282,7 @@ func (r *DeviceReconciler) reconcileDeviceProperties(d *iotv1alpha1.Device, devi klog.V(4).Infof("DeviceName: %s, the desired value and the actual value are different, desired: %s, actual: %s", d.GetName(), desiredProperty.DesiredValue, actualProperty.ActualValue) if err := r.deviceCli.UpdatePropertyState(context.TODO(), propertyName, d, clients.UpdateOptions{}); err != nil { - klog.ErrorS(err, "failed to update property", "DeviceName", d.GetName(), "propertyName", propertyName) + klog.ErrorS(err, "could not update property", "DeviceName", d.GetName(), "propertyName", propertyName) failedPropertyNames = append(failedPropertyNames, propertyName) continue } diff --git a/pkg/yurtiotdock/controllers/device_syncer.go b/pkg/yurtiotdock/controllers/device_syncer.go index 2e695aa6552..d6766a6cef2 100644 --- a/pkg/yurtiotdock/controllers/device_syncer.go +++ b/pkg/yurtiotdock/controllers/device_syncer.go @@ -79,7 +79,7 @@ func (ds *DeviceSyncer) Run(stop <-chan struct{}) { // 1. get device on edge platform and OpenYurt edgeDevices, kubeDevices, err := ds.getAllDevices() if err != nil { - klog.V(3).ErrorS(err, "fail to list the devices") + klog.V(3).ErrorS(err, "could not list the devices") continue } @@ -92,17 +92,17 @@ func (ds *DeviceSyncer) Run(stop <-chan struct{}) { // 3. create device on OpenYurt which are exists in edge platform but not in OpenYurt if err := ds.syncEdgeToKube(redundantEdgeDevices); err != nil { - klog.V(3).ErrorS(err, "fail to create devices on OpenYurt") + klog.V(3).ErrorS(err, "could not create devices on OpenYurt") } // 4. delete redundant device on OpenYurt if err := ds.deleteDevices(redundantKubeDevices); err != nil { - klog.V(3).ErrorS(err, "fail to delete redundant devices on OpenYurt") + klog.V(3).ErrorS(err, "could not delete redundant devices on OpenYurt") } // 5. update device status on OpenYurt if err := ds.updateDevices(syncedDevices); err != nil { - klog.V(3).ErrorS(err, "fail to update devices status") + klog.V(3).ErrorS(err, "could not update devices status") } klog.V(2).Info("[Device] One round of synchronization is complete") } @@ -121,14 +121,14 @@ func (ds *DeviceSyncer) getAllDevices() (map[string]iotv1alpha1.Device, map[stri // 1. list devices on edge platform eDevs, err := ds.deviceCli.List(context.TODO(), edgeCli.ListOptions{Namespace: ds.Namespace}) if err != nil { - klog.V(4).ErrorS(err, "fail to list the devices object on the Edge Platform") + klog.V(4).ErrorS(err, "could not list the devices object on the Edge Platform") return edgeDevice, kubeDevice, err } // 2. list devices on OpenYurt (filter objects belonging to edgeServer) var kDevs iotv1alpha1.DeviceList listOptions := client.MatchingFields{util.IndexerPathForNodepool: ds.NodePool} if err = ds.List(context.TODO(), &kDevs, listOptions, client.InNamespace(ds.Namespace)); err != nil { - klog.V(4).ErrorS(err, "fail to list the devices object on the OpenYurt") + klog.V(4).ErrorS(err, "could not list the devices object on the OpenYurt") return edgeDevice, kubeDevice, err } for i := range eDevs { @@ -185,7 +185,7 @@ func (ds *DeviceSyncer) syncEdgeToKube(edgeDevs map[string]*iotv1alpha1.Device) if apierrors.IsAlreadyExists(err) { continue } - klog.V(5).ErrorS(err, "fail to create device on OpenYurt", "DeviceName", strings.ToLower(ed.Name)) + klog.V(5).ErrorS(err, "could not create device on OpenYurt", "DeviceName", strings.ToLower(ed.Name)) return err } } @@ -196,7 +196,7 @@ func (ds *DeviceSyncer) syncEdgeToKube(edgeDevs map[string]*iotv1alpha1.Device) func (ds *DeviceSyncer) deleteDevices(redundantKubeDevices map[string]*iotv1alpha1.Device) error { for _, kd := range redundantKubeDevices { if err := ds.Client.Delete(context.TODO(), kd); err != nil { - klog.V(5).ErrorS(err, "fail to delete the device on OpenYurt", + klog.V(5).ErrorS(err, "could not delete the device on OpenYurt", "DeviceName", kd.Name) return err } @@ -206,7 +206,7 @@ func (ds *DeviceSyncer) deleteDevices(redundantKubeDevices map[string]*iotv1alph }, }) if err := ds.Client.Patch(context.TODO(), kd, client.RawPatch(types.MergePatchType, patchData)); err != nil { - klog.V(5).ErrorS(err, "fail to remove finalizer of Device on Kubernetes", "Device", kd.Name) + klog.V(5).ErrorS(err, "could not remove finalizer of Device on Kubernetes", "Device", kd.Name) return err } } diff --git a/pkg/yurtiotdock/controllers/deviceprofile_controller.go b/pkg/yurtiotdock/controllers/deviceprofile_controller.go index 3ea4f321918..1e4315b2584 100644 --- a/pkg/yurtiotdock/controllers/deviceprofile_controller.go +++ b/pkg/yurtiotdock/controllers/deviceprofile_controller.go @@ -146,7 +146,7 @@ func (r *DeviceProfileReconciler) reconcileCreateDeviceProfile(ctx context.Conte klog.V(4).Infof("Checking if deviceProfile already exist on the edge platform: %s", dp.GetName()) if edgeDp, err := r.edgeClient.Get(context.TODO(), actualName, clients.GetOptions{Namespace: r.Namespace}); err != nil { if !clients.IsNotFoundErr(err) { - klog.V(4).ErrorS(err, "fail to visit the edge platform") + klog.V(4).ErrorS(err, "could not visit the edge platform") return nil } } else { diff --git a/pkg/yurtiotdock/controllers/deviceprofile_syncer.go b/pkg/yurtiotdock/controllers/deviceprofile_syncer.go index 4a6fe2f0757..726aecfd052 100644 --- a/pkg/yurtiotdock/controllers/deviceprofile_syncer.go +++ b/pkg/yurtiotdock/controllers/deviceprofile_syncer.go @@ -79,7 +79,7 @@ func (dps *DeviceProfileSyncer) Run(stop <-chan struct{}) { // 1. get deviceProfiles on edge platform and OpenYurt edgeDeviceProfiles, kubeDeviceProfiles, err := dps.getAllDeviceProfiles() if err != nil { - klog.V(3).ErrorS(err, "fail to list the deviceProfiles") + klog.V(3).ErrorS(err, "could not list the deviceProfiles") continue } @@ -93,12 +93,12 @@ func (dps *DeviceProfileSyncer) Run(stop <-chan struct{}) { // 3. create deviceProfiles on OpenYurt which are exists in edge platform but not in OpenYurt if err := dps.syncEdgeToKube(redundantEdgeDeviceProfiles); err != nil { - klog.V(3).ErrorS(err, "fail to create deviceProfiles on OpenYurt") + klog.V(3).ErrorS(err, "could not create deviceProfiles on OpenYurt") } // 4. delete redundant deviceProfiles on OpenYurt if err := dps.deleteDeviceProfiles(redundantKubeDeviceProfiles); err != nil { - klog.V(3).ErrorS(err, "fail to delete redundant deviceProfiles on OpenYurt") + klog.V(3).ErrorS(err, "could not delete redundant deviceProfiles on OpenYurt") } // 5. update deviceProfiles on OpenYurt @@ -122,14 +122,14 @@ func (dps *DeviceProfileSyncer) getAllDeviceProfiles() ( // 1. list deviceProfiles on edge platform eDps, err := dps.edgeClient.List(context.TODO(), devcli.ListOptions{Namespace: dps.Namespace}) if err != nil { - klog.V(4).ErrorS(err, "fail to list the deviceProfiles on the edge platform") + klog.V(4).ErrorS(err, "could not list the deviceProfiles on the edge platform") return edgeDeviceProfiles, kubeDeviceProfiles, err } // 2. list deviceProfiles on OpenYurt (filter objects belonging to edgeServer) var kDps iotv1alpha1.DeviceProfileList listOptions := client.MatchingFields{util.IndexerPathForNodepool: dps.NodePool} if err = dps.List(context.TODO(), &kDps, listOptions, client.InNamespace(dps.Namespace)); err != nil { - klog.V(4).ErrorS(err, "fail to list the deviceProfiles on the Kubernetes") + klog.V(4).ErrorS(err, "could not list the deviceProfiles on the Kubernetes") return edgeDeviceProfiles, kubeDeviceProfiles, err } for i := range eDps { @@ -211,7 +211,7 @@ func (dps *DeviceProfileSyncer) syncEdgeToKube(edgeDps map[string]*iotv1alpha1.D func (dps *DeviceProfileSyncer) deleteDeviceProfiles(redundantKubeDeviceProfiles map[string]*iotv1alpha1.DeviceProfile) error { for _, kdp := range redundantKubeDeviceProfiles { if err := dps.Client.Delete(context.TODO(), kdp); err != nil { - klog.V(5).ErrorS(err, "fail to delete the DeviceProfile on Kubernetes: %s ", + klog.V(5).ErrorS(err, "could not delete the DeviceProfile on Kubernetes: %s ", "DeviceProfile", kdp.Name) return err } @@ -221,7 +221,7 @@ func (dps *DeviceProfileSyncer) deleteDeviceProfiles(redundantKubeDeviceProfiles }, }) if err := dps.Client.Patch(context.TODO(), kdp, client.RawPatch(types.MergePatchType, patchData)); err != nil { - klog.V(5).ErrorS(err, "fail to remove finalizer of DeviceProfile on Kubernetes", "DeviceProfile", kdp.Name) + klog.V(5).ErrorS(err, "could not remove finalizer of DeviceProfile on Kubernetes", "DeviceProfile", kdp.Name) return err } } diff --git a/pkg/yurtiotdock/controllers/deviceservice_controller.go b/pkg/yurtiotdock/controllers/deviceservice_controller.go index c277220f564..76015028fc6 100644 --- a/pkg/yurtiotdock/controllers/deviceservice_controller.go +++ b/pkg/yurtiotdock/controllers/deviceservice_controller.go @@ -170,14 +170,14 @@ func (r *DeviceServiceReconciler) reconcileCreateDeviceService(ctx context.Conte // Checking if deviceService already exist on the edge platform if edgeDs, err := r.deviceServiceCli.Get(context.TODO(), edgeDeviceServiceName, clients.GetOptions{Namespace: r.Namespace}); err != nil { if !clients.IsNotFoundErr(err) { - klog.V(4).ErrorS(err, "fail to visit the edge platform") + klog.V(4).ErrorS(err, "could not visit the edge platform") return nil } else { createdDs, err := r.deviceServiceCli.Create(context.TODO(), ds, clients.CreateOptions{}) if err != nil { klog.V(4).ErrorS(err, "failed to create deviceService on edge platform") util.SetDeviceServiceCondition(deviceServiceStatus, util.NewDeviceServiceCondition(iotv1alpha1.DeviceServiceSyncedCondition, corev1.ConditionFalse, iotv1alpha1.DeviceServiceCreateSyncedReason, err.Error())) - return fmt.Errorf("fail to create DeviceService to edge platform: %v", err) + return fmt.Errorf("could not create DeviceService to edge platform: %v", err) } klog.V(4).Infof("Successfully add DeviceService to Edge Platform, Name: %s, EdgeId: %s", ds.GetName(), createdDs.Status.EdgeId) diff --git a/pkg/yurtiotdock/controllers/deviceservice_syncer.go b/pkg/yurtiotdock/controllers/deviceservice_syncer.go index e048cd7449d..bc928e73335 100644 --- a/pkg/yurtiotdock/controllers/deviceservice_syncer.go +++ b/pkg/yurtiotdock/controllers/deviceservice_syncer.go @@ -74,7 +74,7 @@ func (ds *DeviceServiceSyncer) Run(stop <-chan struct{}) { // 1. get deviceServices on edge platform and OpenYurt edgeDeviceServices, kubeDeviceServices, err := ds.getAllDeviceServices() if err != nil { - klog.V(3).ErrorS(err, "fail to list the deviceServices") + klog.V(3).ErrorS(err, "could not list the deviceServices") continue } @@ -88,17 +88,17 @@ func (ds *DeviceServiceSyncer) Run(stop <-chan struct{}) { // 3. create deviceServices on OpenYurt which are exists in edge platform but not in OpenYurt if err := ds.syncEdgeToKube(redundantEdgeDeviceServices); err != nil { - klog.V(3).ErrorS(err, "fail to create deviceServices on OpenYurt") + klog.V(3).ErrorS(err, "could not create deviceServices on OpenYurt") } // 4. delete redundant deviceServices on OpenYurt if err := ds.deleteDeviceServices(redundantKubeDeviceServices); err != nil { - klog.V(3).ErrorS(err, "fail to delete redundant deviceServices on OpenYurt") + klog.V(3).ErrorS(err, "could not delete redundant deviceServices on OpenYurt") } // 5. update deviceService status on OpenYurt if err := ds.updateDeviceServices(syncedDeviceServices); err != nil { - klog.V(3).ErrorS(err, "fail to update deviceServices") + klog.V(3).ErrorS(err, "could not update deviceServices") } klog.V(2).Info("[DeviceService] One round of synchronization is complete") } @@ -120,14 +120,14 @@ func (ds *DeviceServiceSyncer) getAllDeviceServices() ( // 1. list deviceServices on edge platform eDevSs, err := ds.deviceServiceCli.List(context.TODO(), iotcli.ListOptions{Namespace: ds.Namespace}) if err != nil { - klog.V(4).ErrorS(err, "fail to list the deviceServices object on the edge platform") + klog.V(4).ErrorS(err, "could not list the deviceServices object on the edge platform") return edgeDeviceServices, kubeDeviceServices, err } // 2. list deviceServices on OpenYurt (filter objects belonging to edgeServer) var kDevSs iotv1alpha1.DeviceServiceList listOptions := client.MatchingFields{util.IndexerPathForNodepool: ds.NodePool} if err = ds.List(context.TODO(), &kDevSs, listOptions, client.InNamespace(ds.Namespace)); err != nil { - klog.V(4).ErrorS(err, "fail to list the deviceServices object on the Kubernetes") + klog.V(4).ErrorS(err, "could not list the deviceServices object on the Kubernetes") return edgeDeviceServices, kubeDeviceServices, err } for i := range eDevSs { @@ -195,7 +195,7 @@ func (ds *DeviceServiceSyncer) syncEdgeToKube(edgeDevs map[string]*iotv1alpha1.D func (ds *DeviceServiceSyncer) deleteDeviceServices(redundantKubeDeviceServices map[string]*iotv1alpha1.DeviceService) error { for _, kds := range redundantKubeDeviceServices { if err := ds.Client.Delete(context.TODO(), kds); err != nil { - klog.V(5).ErrorS(err, "fail to delete the DeviceService on Kubernetes", + klog.V(5).ErrorS(err, "could not delete the DeviceService on Kubernetes", "DeviceService", kds.Name) return err } @@ -205,7 +205,7 @@ func (ds *DeviceServiceSyncer) deleteDeviceServices(redundantKubeDeviceServices }, }) if err := ds.Client.Patch(context.TODO(), kds, client.RawPatch(types.MergePatchType, patchData)); err != nil { - klog.V(5).ErrorS(err, "fail to remove finalizer of DeviceService on Kubernetes", "DeviceService", kds.Name) + klog.V(5).ErrorS(err, "could not remove finalizer of DeviceService on Kubernetes", "DeviceService", kds.Name) return err } } @@ -223,7 +223,7 @@ func (ds *DeviceServiceSyncer) updateDeviceServices(syncedDeviceServices map[str klog.V(5).InfoS("update Conflicts", "DeviceService", sd.Name) continue } - klog.V(5).ErrorS(err, "fail to update the DeviceService on Kubernetes", + klog.V(5).ErrorS(err, "could not update the DeviceService on Kubernetes", "DeviceService", sd.Name) return err } diff --git a/pkg/yurtiotdock/controllers/predicate.go b/pkg/yurtiotdock/controllers/predicate.go index 0c6dc5eadb5..8d6ff13cd8e 100644 --- a/pkg/yurtiotdock/controllers/predicate.go +++ b/pkg/yurtiotdock/controllers/predicate.go @@ -31,12 +31,12 @@ func genFirstUpdateFilter(objKind string) predicate.Predicate { UpdateFunc: func(e event.UpdateEvent) bool { oldDp, ok := e.ObjectOld.(edgexCli.EdgeXObject) if !ok { - klog.Infof("fail to assert object to deviceprofile, object kind is %s", objKind) + klog.Infof("could not assert object to deviceprofile, object kind is %s", objKind) return false } newDp, ok := e.ObjectNew.(edgexCli.EdgeXObject) if !ok { - klog.Infof("fail to assert object to deviceprofile, object kind is %s", objKind) + klog.Infof("could not assert object to deviceprofile, object kind is %s", objKind) return false } if !oldDp.IsAddedToEdgeX() && newDp.IsAddedToEdgeX() { diff --git a/pkg/yurtmanager/controller/csrapprover/csrapprover_controller.go b/pkg/yurtmanager/controller/csrapprover/csrapprover_controller.go index 7e9f3d28419..990e10d66e1 100644 --- a/pkg/yurtmanager/controller/csrapprover/csrapprover_controller.go +++ b/pkg/yurtmanager/controller/csrapprover/csrapprover_controller.go @@ -148,7 +148,7 @@ func (r *ReconcileCsrApprover) InjectMapper(mapper meta.RESTMapper) error { func (r *ReconcileCsrApprover) InjectConfig(cfg *rest.Config) error { client, err := kubernetes.NewForConfig(cfg) if err != nil { - klog.Errorf("failed to create kube client, %v", err) + klog.Errorf("could not create kube client, %v", err) return err } r.csrApproverClient = client @@ -211,7 +211,7 @@ func (r *ReconcileCsrApprover) Reconcile(ctx context.Context, request reconcile. // Update CertificateSigningRequests err = r.updateApproval(ctx, v1Instance) if err != nil { - klog.Errorf("failed to approve %s(%s), %v", yurtCsr, v1Instance.GetName(), err) + klog.Errorf("could not approve %s(%s), %v", yurtCsr, v1Instance.GetName(), err) return reconcile.Result{}, err } klog.Infof("successfully approve %s(%s)", yurtCsr, v1Instance.GetName()) diff --git a/pkg/yurtmanager/controller/daemonpodupdater/daemon_pod_updater_controller.go b/pkg/yurtmanager/controller/daemonpodupdater/daemon_pod_updater_controller.go index 014ecbd6b96..1cf4fb64b55 100644 --- a/pkg/yurtmanager/controller/daemonpodupdater/daemon_pod_updater_controller.go +++ b/pkg/yurtmanager/controller/daemonpodupdater/daemon_pod_updater_controller.go @@ -127,7 +127,7 @@ func newReconciler(_ *appconfig.CompletedConfig, mgr manager.Manager) reconcile. func (r *ReconcileDaemonpodupdater) InjectConfig(cfg *rest.Config) error { c, err := kubernetes.NewForConfig(cfg) if err != nil { - klog.Errorf("failed to create kube client, %v", err) + klog.Errorf("could not create kube client, %v", err) return err } // Use PodControlInterface to delete pods, which is convenient for testing @@ -215,7 +215,7 @@ func (r *ReconcileDaemonpodupdater) Reconcile(_ context.Context, request reconci // Fetch the DaemonSet instance instance := &appsv1.DaemonSet{} if err := r.Get(context.TODO(), request.NamespacedName, instance); err != nil { - klog.Errorf("Fail to get DaemonSet %v, %v", request.NamespacedName, err) + klog.Errorf("could not get DaemonSet %v, %v", request.NamespacedName, err) if apierrors.IsNotFound(err) { r.expectations.DeleteExpectations(request.NamespacedName.String()) } @@ -243,13 +243,13 @@ func (r *ReconcileDaemonpodupdater) Reconcile(_ context.Context, request reconci switch v { case OTAUpdate: if err := r.otaUpdate(instance); err != nil { - klog.Errorf(Format("Fail to OTA update DaemonSet %v pod: %v", request.NamespacedName, err)) + klog.Errorf(Format("could not OTA update DaemonSet %v pod: %v", request.NamespacedName, err)) return reconcile.Result{}, err } case AutoUpdate, AdvancedRollingUpdate: if err := r.advancedRollingUpdate(instance); err != nil { - klog.Errorf(Format("Fail to advanced rolling update DaemonSet %v pod: %v", request.NamespacedName, err)) + klog.Errorf(Format("could not advanced rolling update DaemonSet %v pod: %v", request.NamespacedName, err)) return reconcile.Result{}, err } default: @@ -263,7 +263,7 @@ func (r *ReconcileDaemonpodupdater) Reconcile(_ context.Context, request reconci func (r *ReconcileDaemonpodupdater) deletePod(evt event.DeleteEvent, _ workqueue.RateLimitingInterface) { pod, ok := evt.Object.(*corev1.Pod) if !ok { - utilruntime.HandleError(fmt.Errorf("deletepod fail to deal with object that is not a pod %#v", evt.Object)) + utilruntime.HandleError(fmt.Errorf("deletepod could not deal with object that is not a pod %#v", evt.Object)) return } @@ -406,7 +406,7 @@ func (r *ReconcileDaemonpodupdater) getNodesToDaemonPods(ds *appsv1.DaemonSet) ( for _, pod := range pods { nodeName, err := GetTargetNodeName(pod) if err != nil { - klog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v", + klog.Warningf("could not get target node name of Pod %v/%v in DaemonSet %v/%v", pod.Namespace, pod.Name, ds.Namespace, ds.Name) continue } diff --git a/pkg/yurtmanager/controller/internal/controller/controller.go b/pkg/yurtmanager/controller/internal/controller/controller.go index 702c3b16ff7..af6a2302cf7 100644 --- a/pkg/yurtmanager/controller/internal/controller/controller.go +++ b/pkg/yurtmanager/controller/internal/controller/controller.go @@ -167,7 +167,7 @@ func (c *Controller) Start(ctx context.Context) error { // WaitForSync waits for a definitive timeout, and returns if there // is an error or a timeout if err := syncingSource.WaitForSync(sourceStartCtx); err != nil { - err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err) + err := fmt.Errorf("could not wait for %s caches to sync: %w", c.Name, err) klog.ErrorS(err, "Could not wait for Cache to sync") return err } @@ -214,7 +214,7 @@ func (c *Controller) WaitForStarted(ctx context.Context) bool { return true, nil }, ctx.Done()) if err != nil { - klog.V(2).InfoS("failed to start %s controller , %v", c.Name, err) + klog.V(2).InfoS("could not start %s controller , %v", c.Name, err) return false } diff --git a/pkg/yurtmanager/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/yurtmanager/controller/nodelifecycle/node_lifecycle_controller.go index 0879ca83e99..97031a1d718 100644 --- a/pkg/yurtmanager/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/yurtmanager/controller/nodelifecycle/node_lifecycle_controller.go @@ -446,7 +446,7 @@ func newReconciler(cfg *appconfig.CompletedConfig, mgr manager.Manager) (*Reconc nc.computeZoneStateFunc = nc.ComputeZoneState kubeClient, err := clientset.NewForConfig(mgr.GetConfig()) if err != nil { - klog.Errorf("failed to create kube client, %v", err) + klog.Errorf("could not create kube client, %v", err) return nil, err } nc.kubeClient = kubeClient @@ -507,13 +507,13 @@ func (nc *ReconcileNodeLifeCycle) doNodeProcessingPassWorker(ctx context.Context } nodeName := obj.(string) if err := nc.doNoScheduleTaintingPass(ctx, nodeName); err != nil { - klog.ErrorS(err, "Failed to taint NoSchedule on node, requeue it", "node", klog.KRef("", nodeName)) + klog.ErrorS(err, "could not taint NoSchedule on node, requeue it", "node", klog.KRef("", nodeName)) // TODO(k82cn): Add nodeName back to the queue } // TODO: re-evaluate whether there are any labels that need to be // reconcile in 1.19. Remove this function if it's no longer necessary. if err := nc.reconcileNodeLabels(ctx, nodeName); err != nil { - klog.ErrorS(err, "Failed to reconcile labels for node, requeue it", "node", klog.KRef("", nodeName)) + klog.ErrorS(err, "could not reconcile labels for node, requeue it", "node", klog.KRef("", nodeName)) // TODO(yujuhong): Add nodeName back to the queue } nc.nodeUpdateQueue.Done(nodeName) @@ -572,7 +572,7 @@ func (nc *ReconcileNodeLifeCycle) doNoScheduleTaintingPass(ctx context.Context, return nil } if !controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, taintsToAdd, taintsToDel, node) { - return fmt.Errorf("failed to swap taints of node %+v", node) + return fmt.Errorf("could not swap taints of node %+v", node) } return nil } @@ -611,7 +611,7 @@ func (nc *ReconcileNodeLifeCycle) doNoExecuteTaintingPass(ctx context.Context) { klog.InfoS("Node no longer present in nodeLister", "node", klog.KRef("", value.Value)) return true, 0 } else if err != nil { - klog.InfoS("Failed to get Node from the nodeLister", "node", klog.KRef("", value.Value), "err", err) + klog.InfoS("could not get Node from the nodeLister", "node", klog.KRef("", value.Value), "err", err) // retry in 50 millisecond return false, 50 * time.Millisecond } @@ -780,7 +780,7 @@ func (nc *ReconcileNodeLifeCycle) processTaintBaseEviction(ctx context.Context, if taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) { taintToAdd := *NotReadyTaintTemplate if !controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) { - klog.ErrorS(nil, "Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle") + klog.ErrorS(nil, "could not instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle") } } else if nc.markNodeForTainting(node, v1.ConditionFalse) { klog.V(2).InfoS("Node is NotReady. Adding it to the Taint queue", "node", klog.KObj(node), "timeStamp", decisionTimestamp) @@ -790,7 +790,7 @@ func (nc *ReconcileNodeLifeCycle) processTaintBaseEviction(ctx context.Context, if taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) { taintToAdd := *UnreachableTaintTemplate if !controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) { - klog.ErrorS(nil, "Failed to instantly swap NotReadyTaint to UnreachableTaint. Will try again in the next cycle") + klog.ErrorS(nil, "could not instantly swap NotReadyTaint to UnreachableTaint. Will try again in the next cycle") } } else if nc.markNodeForTainting(node, v1.ConditionUnknown) { klog.V(2).InfoS("Node is unresponsive. Adding it to the Taint queue", "node", klog.KObj(node), "timeStamp", decisionTimestamp) @@ -798,7 +798,7 @@ func (nc *ReconcileNodeLifeCycle) processTaintBaseEviction(ctx context.Context, case v1.ConditionTrue: removed, err := nc.markNodeAsReachable(ctx, node) if err != nil { - klog.ErrorS(nil, "Failed to remove taints from node. Will retry in next iteration", "node", klog.KObj(node)) + klog.ErrorS(nil, "could not remove taints from node. Will retry in next iteration", "node", klog.KObj(node)) } if removed { klog.V(2).InfoS("Node is healthy again, removing all taints", "node", klog.KObj(node)) @@ -1033,7 +1033,7 @@ func (nc *ReconcileNodeLifeCycle) handleDisruption(ctx context.Context, zoneToNo for i := range nodes { _, err := nc.markNodeAsReachable(ctx, nodes[i]) if err != nil { - klog.ErrorS(nil, "Failed to remove taints from Node", "node", klog.KObj(nodes[i])) + klog.ErrorS(nil, "could not remove taints from Node", "node", klog.KObj(nodes[i])) } } // We stop all evictions. @@ -1116,7 +1116,7 @@ func (nc *ReconcileNodeLifeCycle) processPod(ctx context.Context, podItem podUpd // If the pod was deleted, there is no need to requeue. return } - klog.InfoS("Failed to read pod", "pod", klog.KRef(podItem.namespace, podItem.name), "err", err) + klog.InfoS("could not read pod", "pod", klog.KRef(podItem.namespace, podItem.name), "err", err) nc.podUpdateQueue.AddRateLimited(podItem) return } @@ -1133,7 +1133,7 @@ func (nc *ReconcileNodeLifeCycle) processPod(ctx context.Context, podItem podUpd //_, err = nc.nodeLister.Get(nodeName) err = nc.controllerRuntimeClient.Get(ctx, types.NamespacedName{Name: nodeName}, node) if err != nil { - klog.InfoS("Failed to read node", "node", klog.KRef("", nodeName), "err", err) + klog.InfoS("could not read node", "node", klog.KRef("", nodeName), "err", err) nc.podUpdateQueue.AddRateLimited(podItem) return } @@ -1259,12 +1259,12 @@ func (nc *ReconcileNodeLifeCycle) markNodeForTainting(node *v1.Node, status v1.C func (nc *ReconcileNodeLifeCycle) markNodeAsReachable(ctx context.Context, node *v1.Node) (bool, error) { err := controllerutil.RemoveTaintOffNode(ctx, nc.kubeClient, node.Name, node, UnreachableTaintTemplate) if err != nil { - klog.ErrorS(err, "Failed to remove taint from node", "node", klog.KObj(node)) + klog.ErrorS(err, "could not remove taint from node", "node", klog.KObj(node)) return false, err } err = controllerutil.RemoveTaintOffNode(ctx, nc.kubeClient, node.Name, node, NotReadyTaintTemplate) if err != nil { - klog.ErrorS(err, "Failed to remove taint from node", "node", klog.KObj(node)) + klog.ErrorS(err, "could not remove taint from node", "node", klog.KObj(node)) return false, err } nc.evictorLock.Lock() diff --git a/pkg/yurtmanager/controller/nodelifecycle/scheduler/taint_manager.go b/pkg/yurtmanager/controller/nodelifecycle/scheduler/taint_manager.go index 108de7c23cf..6a683d6c3ab 100644 --- a/pkg/yurtmanager/controller/nodelifecycle/scheduler/taint_manager.go +++ b/pkg/yurtmanager/controller/nodelifecycle/scheduler/taint_manager.go @@ -443,7 +443,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(ctx context.Context, nodeUpdat // tc.PodUpdated which will use tc.taintedNodes to potentially delete delayed pods. pods, err := tc.getPodsAssignedToNode(node.Name) if err != nil { - klog.Errorf("Failed to get pods assigned to node(%s), %v", node.Name, err) + klog.Errorf("could not get pods assigned to node(%s), %v", node.Name, err) return } if len(pods) == 0 { diff --git a/pkg/yurtmanager/controller/nodepool/nodepool_enqueue_handlers.go b/pkg/yurtmanager/controller/nodepool/nodepool_enqueue_handlers.go index 6fe91665d6b..e84f89299f9 100644 --- a/pkg/yurtmanager/controller/nodepool/nodepool_enqueue_handlers.go +++ b/pkg/yurtmanager/controller/nodepool/nodepool_enqueue_handlers.go @@ -41,7 +41,7 @@ func (e *EnqueueNodePoolForNode) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { node, ok := evt.Object.(*corev1.Node) if !ok { - klog.Error(Format("fail to assert runtime Object to v1.Node")) + klog.Error(Format("could not assert runtime Object to v1.Node")) return } klog.V(5).Infof(Format("will enqueue nodepool as node(%s) has been created", @@ -58,13 +58,13 @@ func (e *EnqueueNodePoolForNode) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { newNode, ok := evt.ObjectNew.(*corev1.Node) if !ok { - klog.Errorf(Format("Fail to assert runtime Object(%s) to v1.Node", + klog.Errorf(Format("could not assert runtime Object(%s) to v1.Node", evt.ObjectNew.GetName())) return } oldNode, ok := evt.ObjectOld.(*corev1.Node) if !ok { - klog.Errorf(Format("fail to assert runtime Object(%s) to v1.Node", + klog.Errorf(Format("could not assert runtime Object(%s) to v1.Node", evt.ObjectOld.GetName())) return } @@ -113,7 +113,7 @@ func (e *EnqueueNodePoolForNode) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { node, ok := evt.Object.(*corev1.Node) if !ok { - klog.Error(Format("Fail to assert runtime Object to v1.Node")) + klog.Error(Format("could not assert runtime Object to v1.Node")) return } diff --git a/pkg/yurtmanager/controller/platformadmin/config/config.go b/pkg/yurtmanager/controller/platformadmin/config/config.go index fa0cf36fbec..bc067613b48 100644 --- a/pkg/yurtmanager/controller/platformadmin/config/config.go +++ b/pkg/yurtmanager/controller/platformadmin/config/config.go @@ -116,12 +116,12 @@ func NewPlatformAdminControllerConfiguration() *PlatformAdminControllerConfigura } securityContent, err := EdgeXFS.ReadFile(securityFile) if err != nil { - klog.Errorf("Fail to open the embed EdgeX security config: %v", err) + klog.Errorf("could not open the embed EdgeX security config: %v", err) return nil } nosectyContent, err := EdgeXFS.ReadFile(nosectyFile) if err != nil { - klog.Errorf("Fail to open the embed EdgeX nosecty config: %v", err) + klog.Errorf("could not open the embed EdgeX nosecty config: %v", err) return nil } @@ -131,7 +131,7 @@ func NewPlatformAdminControllerConfiguration() *PlatformAdminControllerConfigura return nil } if err = json.Unmarshal(securityContent, &edgexconfig); err != nil { - klog.Errorf("Fail to unmarshal the embed EdgeX security config: %v", err) + klog.Errorf("could not unmarshal the embed EdgeX security config: %v", err) return nil } for _, version := range edgexconfig.Versions { @@ -140,7 +140,7 @@ func NewPlatformAdminControllerConfiguration() *PlatformAdminControllerConfigura } if err := json.Unmarshal(nosectyContent, &edgexnosectyconfig); err != nil { - klog.Errorf("Fail to unmarshal the embed EdgeX nosecty config: %v", err) + klog.Errorf("could not unmarshal the embed EdgeX nosecty config: %v", err) return nil } for _, version := range edgexnosectyconfig.Versions { diff --git a/pkg/yurtmanager/controller/platformadmin/platformadmin_controller.go b/pkg/yurtmanager/controller/platformadmin/platformadmin_controller.go index 1004794bcc0..6cf7645060b 100644 --- a/pkg/yurtmanager/controller/platformadmin/platformadmin_controller.go +++ b/pkg/yurtmanager/controller/platformadmin/platformadmin_controller.go @@ -182,7 +182,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { klog.V(4).Infof(Format("registering the field indexers of platformadmin controller")) if err := util.RegisterFieldIndexers(mgr.GetFieldIndexer()); err != nil { - klog.Errorf(Format("failed to register field indexers for platformadmin controller, %v", err)) + klog.Errorf(Format("could not register field indexers for platformadmin controller, %v", err)) return nil } @@ -701,7 +701,7 @@ func (r *ReconcilePlatformAdmin) readFramework(ctx context.Context, platformAdmi return nil }) if err != nil { - klog.Errorf(Format("Failed to remove finalizer of framework configmap for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) + klog.Errorf(Format("could not remove finalizer of framework configmap for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) return nil, err } } else { @@ -716,7 +716,7 @@ func (r *ReconcilePlatformAdmin) readFramework(ctx context.Context, platformAdmi return controllerutil.SetOwnerReference(platformAdmin, cm, r.scheme) }) if err != nil { - klog.Errorf(Format("Failed to add owner reference of framework configmap for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) + klog.Errorf(Format("could not add owner reference of framework configmap for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) return nil, err } } @@ -729,7 +729,7 @@ func (r *ReconcilePlatformAdmin) writeFramework(ctx context.Context, platformAdm // For better serialization, the serialization method of the Kubernetes runtime library is used data, err := runtime.Encode(r.yamlSerializer, platformAdminFramework) if err != nil { - klog.Errorf(Format("Failed to marshal framework for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) + klog.Errorf(Format("could not marshal framework for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) return err } @@ -756,7 +756,7 @@ func (r *ReconcilePlatformAdmin) writeFramework(ctx context.Context, platformAdm return controllerutil.SetOwnerReference(platformAdmin, cm, r.Scheme()) }) if err != nil { - klog.Errorf(Format("Failed to write framework configmap for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) + klog.Errorf(Format("could not write framework configmap for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) return err } return nil @@ -779,7 +779,7 @@ func (r *ReconcilePlatformAdmin) initFramework(ctx context.Context, platformAdmi // For better serialization, the serialization method of the Kubernetes runtime library is used data, err := runtime.Encode(r.yamlSerializer, platformAdminFramework) if err != nil { - klog.Errorf(Format("Failed to marshal framework for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) + klog.Errorf(Format("could not marshal framework for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) return err } @@ -802,7 +802,7 @@ func (r *ReconcilePlatformAdmin) initFramework(ctx context.Context, platformAdmi return controllerutil.SetOwnerReference(platformAdmin, cm, r.Scheme()) }) if err != nil { - klog.Errorf(Format("Failed to init framework configmap for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) + klog.Errorf(Format("could not init framework configmap for PlatformAdmin %s/%s", platformAdmin.Namespace, platformAdmin.Name)) return err } return nil diff --git a/pkg/yurtmanager/controller/raven/dns/dns_controller.go b/pkg/yurtmanager/controller/raven/dns/dns_controller.go index 88cc5990d0d..8ffe41129bb 100644 --- a/pkg/yurtmanager/controller/raven/dns/dns_controller.go +++ b/pkg/yurtmanager/controller/raven/dns/dns_controller.go @@ -125,7 +125,7 @@ func (r *ReconcileDns) Reconcile(ctx context.Context, req reconcile.Request) (re } else { svc, err := r.getService(ctx, types.NamespacedName{Namespace: util.WorkingNamespace, Name: util.GatewayProxyInternalService}) if err != nil && !apierrors.IsNotFound(err) { - klog.V(2).Infof(Format("failed to get service %s/%s", util.WorkingNamespace, util.GatewayProxyInternalService)) + klog.V(2).Infof(Format("could not get service %s/%s", util.WorkingNamespace, util.GatewayProxyInternalService)) return reconcile.Result{Requeue: true, RequeueAfter: 2 * time.Second}, err } if apierrors.IsNotFound(err) || svc.DeletionTimestamp != nil { @@ -146,12 +146,12 @@ func (r *ReconcileDns) Reconcile(ctx context.Context, req reconcile.Request) (re nodeList := new(corev1.NodeList) err = r.Client.List(ctx, nodeList, &client.ListOptions{}) if err != nil { - return reconcile.Result{Requeue: true, RequeueAfter: 2 * time.Second}, fmt.Errorf("failed to list node, error %s", err.Error()) + return reconcile.Result{Requeue: true, RequeueAfter: 2 * time.Second}, fmt.Errorf("could not list node, error %s", err.Error()) } cm.Data[util.ProxyNodesKey] = buildDNSRecords(nodeList, enableProxy, proxyAddress) err = r.updateDNS(cm) if err != nil { - return reconcile.Result{Requeue: true, RequeueAfter: 2 * time.Second}, fmt.Errorf("failed to update configmap %s/%s, error %s", + return reconcile.Result{Requeue: true, RequeueAfter: 2 * time.Second}, fmt.Errorf("could not update configmap %s/%s, error %s", cm.GetNamespace(), cm.GetName(), err.Error()) } return reconcile.Result{}, nil @@ -168,7 +168,7 @@ func (r ReconcileDns) getProxyDNS(ctx context.Context, objKey client.ObjectKey) klog.Errorf(Format(err.Error())) } } else { - klog.Errorf(Format("failed to get configmap %s, error %s", objKey.String(), err.Error())) + klog.Errorf(Format("could not get configmap %s, error %s", objKey.String(), err.Error())) } return false, nil } @@ -176,7 +176,7 @@ func (r ReconcileDns) getProxyDNS(ctx context.Context, objKey client.ObjectKey) }) if waitErr != nil { - return nil, fmt.Errorf("failed to get ConfigMap %s, error %s", objKey.String(), waitErr.Error()) + return nil, fmt.Errorf("could not get ConfigMap %s, error %s", objKey.String(), waitErr.Error()) } return cm.DeepCopy(), nil } @@ -193,7 +193,7 @@ func (r *ReconcileDns) buildRavenDNSConfigMap() error { } err := r.Client.Create(context.TODO(), cm, &client.CreateOptions{}) if err != nil { - return fmt.Errorf("failed to create ConfigMap %s/%s, error %s", cm.GetNamespace(), cm.GetName(), err.Error()) + return fmt.Errorf("could not create ConfigMap %s/%s, error %s", cm.GetNamespace(), cm.GetName(), err.Error()) } return nil } @@ -210,7 +210,7 @@ func (r *ReconcileDns) getService(ctx context.Context, objectKey client.ObjectKe func (r *ReconcileDns) updateDNS(cm *corev1.ConfigMap) error { err := r.Client.Update(context.TODO(), cm, &client.UpdateOptions{}) if err != nil { - return fmt.Errorf("failed to update configmap %s/%s, %s", cm.GetNamespace(), cm.GetName(), err.Error()) + return fmt.Errorf("could not update configmap %s/%s, %s", cm.GetNamespace(), cm.GetName(), err.Error()) } return nil } @@ -228,7 +228,7 @@ func buildDNSRecords(nodeList *corev1.NodeList, needProxy bool, proxyIp string) if !needProxy { ip, err = getHostIP(&node) if err != nil { - klog.Errorf(Format("failed to parse node address for %s, %s", node.Name, err.Error())) + klog.Errorf(Format("could not parse node address for %s, %s", node.Name, err.Error())) continue } } diff --git a/pkg/yurtmanager/controller/raven/dns/dns_enqueue_handlers.go b/pkg/yurtmanager/controller/raven/dns/dns_enqueue_handlers.go index 23a29ceb436..624e1e7ab4c 100644 --- a/pkg/yurtmanager/controller/raven/dns/dns_enqueue_handlers.go +++ b/pkg/yurtmanager/controller/raven/dns/dns_enqueue_handlers.go @@ -30,11 +30,11 @@ type EnqueueRequestForServiceEvent struct{} func (h *EnqueueRequestForServiceEvent) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { svc, ok := e.Object.(*corev1.Service) if !ok { - klog.Error(Format("fail to assert runtime Object to v1.Service")) + klog.Error(Format("could not assert runtime Object to v1.Service")) return } if svc.Spec.ClusterIP == "" { - klog.Error(Format("failed to get cluster IP %s/%s", svc.Namespace, svc.Name)) + klog.Error(Format("could not get cluster IP %s/%s", svc.Namespace, svc.Name)) return } @@ -45,12 +45,12 @@ func (h *EnqueueRequestForServiceEvent) Create(e event.CreateEvent, q workqueue. func (h *EnqueueRequestForServiceEvent) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { newSvc, ok := e.ObjectNew.(*corev1.Service) if !ok { - klog.Error(Format("fail to assert runtime Object to v1.Service")) + klog.Error(Format("could not assert runtime Object to v1.Service")) return } oldSvc, ok := e.ObjectOld.(*corev1.Service) if !ok { - klog.Error(Format("fail to assert runtime Object to v1.Service")) + klog.Error(Format("could not assert runtime Object to v1.Service")) return } if newSvc.Spec.ClusterIP != oldSvc.Spec.ClusterIP { @@ -62,7 +62,7 @@ func (h *EnqueueRequestForServiceEvent) Update(e event.UpdateEvent, q workqueue. func (h *EnqueueRequestForServiceEvent) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { _, ok := e.Object.(*corev1.Service) if !ok { - klog.Error(Format("fail to assert runtime Object to v1.Service")) + klog.Error(Format("could not assert runtime Object to v1.Service")) return } klog.V(2).Infof(Format("enqueue configmap %s/%s due to service update event", util.WorkingNamespace, util.RavenProxyNodesConfig)) @@ -79,7 +79,7 @@ type EnqueueRequestForNodeEvent struct{} func (h *EnqueueRequestForNodeEvent) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { _, ok := e.Object.(*corev1.Node) if !ok { - klog.Error(Format("fail to assert runtime Object to v1.Node")) + klog.Error(Format("could not assert runtime Object to v1.Node")) return } klog.V(2).Infof(Format("enqueue configmap %s/%s due to node create event", util.WorkingNamespace, util.RavenProxyNodesConfig)) @@ -93,7 +93,7 @@ func (h *EnqueueRequestForNodeEvent) Update(e event.UpdateEvent, q workqueue.Rat func (h *EnqueueRequestForNodeEvent) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { _, ok := e.Object.(*corev1.Node) if !ok { - klog.Error(Format("fail to assert runtime Object to v1.Node")) + klog.Error(Format("could not assert runtime Object to v1.Node")) return } klog.V(2).Infof(Format("enqueue configmap %s/%s due to node delete event", util.WorkingNamespace, util.RavenProxyNodesConfig)) diff --git a/pkg/yurtmanager/controller/raven/gatewayinternalservice/gateway_internal_service_enqueue_handlers.go b/pkg/yurtmanager/controller/raven/gatewayinternalservice/gateway_internal_service_enqueue_handlers.go index 5b2251a5524..d1a0f9e1e04 100644 --- a/pkg/yurtmanager/controller/raven/gatewayinternalservice/gateway_internal_service_enqueue_handlers.go +++ b/pkg/yurtmanager/controller/raven/gatewayinternalservice/gateway_internal_service_enqueue_handlers.go @@ -33,7 +33,7 @@ type EnqueueRequestForGatewayEvent struct{} func (h *EnqueueRequestForGatewayEvent) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { gw, ok := e.Object.(*ravenv1beta1.Gateway) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1beta1.Gateway", e.Object.GetNamespace(), e.Object.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1beta1.Gateway", e.Object.GetNamespace(), e.Object.GetName())) return } if gw.Spec.ExposeType == "" { @@ -46,12 +46,12 @@ func (h *EnqueueRequestForGatewayEvent) Create(e event.CreateEvent, q workqueue. func (h *EnqueueRequestForGatewayEvent) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { newGw, ok := e.ObjectNew.(*ravenv1beta1.Gateway) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1beta1.Gateway", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1beta1.Gateway", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) return } oldGw, ok := e.ObjectOld.(*ravenv1beta1.Gateway) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1beta1.Gateway", e.ObjectOld.GetNamespace(), e.ObjectOld.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1beta1.Gateway", e.ObjectOld.GetNamespace(), e.ObjectOld.GetName())) return } if oldGw.Spec.ExposeType == "" && newGw.Spec.ExposeType == "" { @@ -64,7 +64,7 @@ func (h *EnqueueRequestForGatewayEvent) Update(e event.UpdateEvent, q workqueue. func (h *EnqueueRequestForGatewayEvent) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { gw, ok := e.Object.(*ravenv1beta1.Gateway) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1beta1.Gateway", e.Object.GetNamespace(), e.Object.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1beta1.Gateway", e.Object.GetNamespace(), e.Object.GetName())) return } if gw.Spec.ExposeType == "" { @@ -83,7 +83,7 @@ type EnqueueRequestForConfigEvent struct{} func (h *EnqueueRequestForConfigEvent) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { cm, ok := e.Object.(*corev1.ConfigMap) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1.Configmap", e.Object.GetNamespace(), e.Object.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1.Configmap", e.Object.GetNamespace(), e.Object.GetName())) return } if cm.Data == nil { @@ -108,12 +108,12 @@ func (h *EnqueueRequestForConfigEvent) Create(e event.CreateEvent, q workqueue.R func (h *EnqueueRequestForConfigEvent) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { newCm, ok := e.ObjectNew.(*corev1.ConfigMap) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1.Configmap", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1.Configmap", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) return } oldCm, ok := e.ObjectOld.(*corev1.ConfigMap) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1.Configmap", e.ObjectOld.GetNamespace(), e.ObjectOld.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1.Configmap", e.ObjectOld.GetNamespace(), e.ObjectOld.GetName())) return } _, newInsecurePort, newErr := net.SplitHostPort(newCm.Data[util.ProxyServerInsecurePortKey]) diff --git a/pkg/yurtmanager/controller/raven/gatewaypickup/gateway_pickup_enqueue_handlers.go b/pkg/yurtmanager/controller/raven/gatewaypickup/gateway_pickup_enqueue_handlers.go index ed9fc81066d..d2acf9016f1 100644 --- a/pkg/yurtmanager/controller/raven/gatewaypickup/gateway_pickup_enqueue_handlers.go +++ b/pkg/yurtmanager/controller/raven/gatewaypickup/gateway_pickup_enqueue_handlers.go @@ -36,7 +36,7 @@ type EnqueueGatewayForNode struct{} func (e *EnqueueGatewayForNode) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { node, ok := evt.Object.(*corev1.Node) if !ok { - klog.Error(Format("fail to assert runtime Object to v1.Node")) + klog.Error(Format("could not assert runtime Object to v1.Node")) return } klog.V(5).Infof(Format("will enqueue gateway as node(%s) has been created", @@ -52,13 +52,13 @@ func (e *EnqueueGatewayForNode) Create(evt event.CreateEvent, q workqueue.RateLi func (e *EnqueueGatewayForNode) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { newNode, ok := evt.ObjectNew.(*corev1.Node) if !ok { - klog.Errorf(Format("Fail to assert runtime Object(%s) to v1.Node", + klog.Errorf(Format("could not assert runtime Object(%s) to v1.Node", evt.ObjectNew.GetName())) return } oldNode, ok := evt.ObjectOld.(*corev1.Node) if !ok { - klog.Errorf(Format("fail to assert runtime Object(%s) to v1.Node", + klog.Errorf(Format("could not assert runtime Object(%s) to v1.Node", evt.ObjectOld.GetName())) return } @@ -83,7 +83,7 @@ func (e *EnqueueGatewayForNode) Update(evt event.UpdateEvent, q workqueue.RateLi func (e *EnqueueGatewayForNode) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { node, ok := evt.Object.(*corev1.Node) if !ok { - klog.Error(Format("Fail to assert runtime Object to v1.Node")) + klog.Error(Format("could not assert runtime Object to v1.Node")) return } @@ -109,12 +109,12 @@ type EnqueueGatewayForRavenConfig struct { func (e *EnqueueGatewayForRavenConfig) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { _, ok := evt.Object.(*corev1.ConfigMap) if !ok { - klog.Error(Format("Fail to assert runtime Object to v1.ConfigMap")) + klog.Error(Format("could not assert runtime Object to v1.ConfigMap")) return } klog.V(2).Infof(Format("Will config all gateway as raven-cfg has been created")) if err := e.enqueueGateways(q); err != nil { - klog.Error(Format("failed to config all gateway, error %s", err.Error())) + klog.Error(Format("could not config all gateway, error %s", err.Error())) return } } @@ -122,20 +122,20 @@ func (e *EnqueueGatewayForRavenConfig) Create(evt event.CreateEvent, q workqueue func (e *EnqueueGatewayForRavenConfig) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { oldCm, ok := evt.ObjectOld.(*corev1.ConfigMap) if !ok { - klog.Error(Format("Fail to assert runtime Object to v1.ConfigMap")) + klog.Error(Format("could not assert runtime Object to v1.ConfigMap")) return } newCm, ok := evt.ObjectNew.(*corev1.ConfigMap) if !ok { - klog.Error(Format("Fail to assert runtime Object to v1.ConfigMap")) + klog.Error(Format("could not assert runtime Object to v1.ConfigMap")) return } if oldCm.Data[util.RavenEnableProxy] != newCm.Data[util.RavenEnableProxy] { klog.V(2).Infof(Format("Will config all gateway as raven-cfg has been updated")) if err := e.enqueueGateways(q); err != nil { - klog.Error(Format("failed to config all gateway, error %s", err.Error())) + klog.Error(Format("could not config all gateway, error %s", err.Error())) return } } @@ -143,7 +143,7 @@ func (e *EnqueueGatewayForRavenConfig) Update(evt event.UpdateEvent, q workqueue if oldCm.Data[util.RavenEnableTunnel] != newCm.Data[util.RavenEnableTunnel] { klog.V(2).Infof(Format("Will config all gateway as raven-cfg has been updated")) if err := e.enqueueGateways(q); err != nil { - klog.Error(Format("failed to config all gateway, error %s", err.Error())) + klog.Error(Format("could not config all gateway, error %s", err.Error())) return } } @@ -152,12 +152,12 @@ func (e *EnqueueGatewayForRavenConfig) Update(evt event.UpdateEvent, q workqueue func (e *EnqueueGatewayForRavenConfig) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { _, ok := evt.Object.(*corev1.ConfigMap) if !ok { - klog.Error(Format("Fail to assert runtime Object to v1.ConfigMap")) + klog.Error(Format("could not assert runtime Object to v1.ConfigMap")) return } klog.V(2).Infof(Format("Will config all gateway as raven-cfg has been deleted")) if err := e.enqueueGateways(q); err != nil { - klog.Error(Format("failed to config all gateway, error %s", err.Error())) + klog.Error(Format("could not config all gateway, error %s", err.Error())) return } } diff --git a/pkg/yurtmanager/controller/raven/gatewaypublicservice/gateway_public_service_controller.go b/pkg/yurtmanager/controller/raven/gatewaypublicservice/gateway_public_service_controller.go index 1593eff2847..958cd4948da 100644 --- a/pkg/yurtmanager/controller/raven/gatewaypublicservice/gateway_public_service_controller.go +++ b/pkg/yurtmanager/controller/raven/gatewaypublicservice/gateway_public_service_controller.go @@ -146,7 +146,7 @@ func (r *ReconcileService) Reconcile(ctx context.Context, req reconcile.Request) enableTunnel = false enableProxy = false } else { - klog.Error(Format("failed to get gateway %s, error %s", req.Name, err.Error())) + klog.Error(Format("could not get gateway %s, error %s", req.Name, err.Error())) return reconcile.Result{}, err } } @@ -190,13 +190,13 @@ func (r *ReconcileService) reconcileService(ctx context.Context, gw *ravenv1beta klog.V(2).Info(Format("start manage proxy service for gateway %s", gw.GetName())) defer klog.V(2).Info(Format("finish manage proxy service for gateway %s", gw.GetName())) if err := r.manageService(ctx, gw, ravenv1beta1.Proxy, record); err != nil { - return fmt.Errorf("failed to manage service for proxy server %s", err.Error()) + return fmt.Errorf("could not manage service for proxy server %s", err.Error()) } } else { klog.V(2).Info(Format("start clear proxy service for gateway %s", gw.GetName())) defer klog.V(2).Info(Format("finish clear proxy service for gateway %s", gw.GetName())) if err := r.clearService(ctx, gw.GetName(), ravenv1beta1.Proxy); err != nil { - return fmt.Errorf("failed to clear service for proxy server %s", err.Error()) + return fmt.Errorf("could not clear service for proxy server %s", err.Error()) } } @@ -204,13 +204,13 @@ func (r *ReconcileService) reconcileService(ctx context.Context, gw *ravenv1beta klog.V(2).Info(Format("start manage tunnel service for gateway %s", gw.GetName())) defer klog.V(2).Info(Format("finish manage tunnel service for gateway %s", gw.GetName())) if err := r.manageService(ctx, gw, ravenv1beta1.Tunnel, record); err != nil { - return fmt.Errorf("failed to manage service for tunnel server %s", err.Error()) + return fmt.Errorf("could not manage service for tunnel server %s", err.Error()) } } else { klog.V(2).Info(Format("start clear tunnel service for gateway %s", gw.GetName())) defer klog.V(2).Info(Format("finish clear tunnel service for gateway %s", gw.GetName())) if err := r.clearService(ctx, gw.GetName(), ravenv1beta1.Tunnel); err != nil { - return fmt.Errorf("failed to clear service for tunnel server %s", err.Error()) + return fmt.Errorf("could not clear service for tunnel server %s", err.Error()) } } return nil @@ -221,26 +221,26 @@ func (r *ReconcileService) reconcileEndpoints(ctx context.Context, gw *ravenv1be klog.V(2).Info(Format("start manage proxy service endpoints for gateway %s", gw.GetName())) defer klog.V(2).Info(Format("finish manage proxy service endpoints for gateway %s", gw.GetName())) if err := r.manageEndpoints(ctx, gw, ravenv1beta1.Proxy, record); err != nil { - return fmt.Errorf("failed to manage endpoints for proxy server %s", err.Error()) + return fmt.Errorf("could not manage endpoints for proxy server %s", err.Error()) } } else { klog.V(2).Info(Format("start clear proxy service endpoints for gateway %s", gw.GetName())) defer klog.V(2).Info(Format("finish clear proxy service endpoints for gateway %s", gw.GetName())) if err := r.clearEndpoints(ctx, gw.GetName(), ravenv1beta1.Proxy); err != nil { - return fmt.Errorf("failed to clear endpoints for proxy server %s", err.Error()) + return fmt.Errorf("could not clear endpoints for proxy server %s", err.Error()) } } if enableTunnel { klog.V(2).Info(Format("start manage tunnel service endpoints for gateway %s", gw.GetName())) defer klog.V(2).Info(Format("finish manage tunnel service endpoints for gateway %s", gw.GetName())) if err := r.manageEndpoints(ctx, gw, ravenv1beta1.Tunnel, record); err != nil { - return fmt.Errorf("failed to manage endpoints for tunnel server %s", err.Error()) + return fmt.Errorf("could not manage endpoints for tunnel server %s", err.Error()) } } else { klog.V(2).Info(Format("start clear tunnel service endpoints for gateway %s", gw.GetName())) defer klog.V(2).Info(Format("finish clear tunnel service endpoints for gateway %s", gw.GetName())) if err := r.clearEndpoints(ctx, gw.GetName(), ravenv1beta1.Tunnel); err != nil { - return fmt.Errorf("failed to clear endpoints for tunnel server %s", err.Error()) + return fmt.Errorf("could not clear endpoints for tunnel server %s", err.Error()) } } return nil @@ -249,13 +249,13 @@ func (r *ReconcileService) reconcileEndpoints(ctx context.Context, gw *ravenv1be func (r *ReconcileService) clearService(ctx context.Context, gatewayName, gatewayType string) error { svcList, err := r.listService(ctx, gatewayName, gatewayType) if err != nil { - return fmt.Errorf("failed to list service for gateway %s", gatewayName) + return fmt.Errorf("could not list service for gateway %s", gatewayName) } for _, svc := range svcList.Items { err := r.Delete(ctx, svc.DeepCopy()) if err != nil { r.recorder.Event(svc.DeepCopy(), corev1.EventTypeWarning, ServiceDeleteFailed, - fmt.Sprintf("The gateway %s %s server is not need to exposed by loadbalancer, failed to delete service %s/%s", + fmt.Sprintf("The gateway %s %s server is not need to exposed by loadbalancer, could not delete service %s/%s", gatewayName, gatewayType, svc.GetNamespace(), svc.GetName())) continue } @@ -266,13 +266,13 @@ func (r *ReconcileService) clearService(ctx context.Context, gatewayName, gatewa func (r *ReconcileService) clearEndpoints(ctx context.Context, gatewayName, gatewayType string) error { epsList, err := r.listEndpoints(ctx, gatewayName, gatewayType) if err != nil { - return fmt.Errorf("failed to list endpoints for gateway %s", gatewayName) + return fmt.Errorf("could not list endpoints for gateway %s", gatewayName) } for _, eps := range epsList.Items { err := r.Delete(ctx, eps.DeepCopy()) if err != nil { r.recorder.Event(eps.DeepCopy(), corev1.EventTypeWarning, ServiceDeleteFailed, - fmt.Sprintf("The gateway %s %s server is not need to exposed by loadbalancer, failed to delete endpoints %s/%s", + fmt.Sprintf("The gateway %s %s server is not need to exposed by loadbalancer, could not delete endpoints %s/%s", gatewayName, gatewayType, eps.GetNamespace(), eps.GetName())) continue } @@ -473,7 +473,7 @@ func (r *ReconcileService) getEndpointsAddress(ctx context.Context, name string) var node corev1.Node err := r.Get(ctx, types.NamespacedName{Name: name}, &node) if err != nil { - klog.Errorf(Format("failed to get node %s for get active endpoints address, error %s", name, err.Error())) + klog.Errorf(Format("could not get node %s for get active endpoints address, error %s", name, err.Error())) return nil, err } return &corev1.EndpointAddress{NodeName: func(n corev1.Node) *string { return &n.Name }(node), IP: util.GetNodeInternalIP(node)}, nil diff --git a/pkg/yurtmanager/controller/raven/gatewaypublicservice/gateway_public_service_enqueue_handlers.go b/pkg/yurtmanager/controller/raven/gatewaypublicservice/gateway_public_service_enqueue_handlers.go index 02870446bb4..0cf1528d800 100644 --- a/pkg/yurtmanager/controller/raven/gatewaypublicservice/gateway_public_service_enqueue_handlers.go +++ b/pkg/yurtmanager/controller/raven/gatewaypublicservice/gateway_public_service_enqueue_handlers.go @@ -35,7 +35,7 @@ type EnqueueRequestForGatewayEvent struct{} func (h *EnqueueRequestForGatewayEvent) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { gw, ok := e.Object.(*ravenv1beta1.Gateway) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1beta1.Gateway,", e.Object.GetNamespace(), e.Object.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1beta1.Gateway,", e.Object.GetNamespace(), e.Object.GetName())) return } if gw.Spec.ExposeType != ravenv1beta1.ExposeTypeLoadBalancer { @@ -48,12 +48,12 @@ func (h *EnqueueRequestForGatewayEvent) Create(e event.CreateEvent, q workqueue. func (h *EnqueueRequestForGatewayEvent) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { newGw, ok := e.ObjectNew.(*ravenv1beta1.Gateway) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1beta1.Gateway,", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1beta1.Gateway,", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) return } oldGw, ok := e.ObjectOld.(*ravenv1beta1.Gateway) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1beta1.Gateway,", e.ObjectOld.GetNamespace(), e.ObjectOld.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1beta1.Gateway,", e.ObjectOld.GetNamespace(), e.ObjectOld.GetName())) return } if needUpdate(newGw, oldGw) { @@ -65,7 +65,7 @@ func (h *EnqueueRequestForGatewayEvent) Update(e event.UpdateEvent, q workqueue. func (h *EnqueueRequestForGatewayEvent) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { gw, ok := e.Object.(*ravenv1beta1.Gateway) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1beta1.Gateway,", e.Object.GetNamespace(), e.Object.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1beta1.Gateway,", e.Object.GetNamespace(), e.Object.GetName())) return } if gw.Spec.ExposeType != ravenv1beta1.ExposeTypeLoadBalancer { @@ -98,7 +98,7 @@ type EnqueueRequestForConfigEvent struct { func (h *EnqueueRequestForConfigEvent) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { cm, ok := e.Object.(*corev1.ConfigMap) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1.Configmap,", e.Object.GetNamespace(), e.Object.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1.Configmap,", e.Object.GetNamespace(), e.Object.GetName())) return } if cm.Data == nil { @@ -117,12 +117,12 @@ func (h *EnqueueRequestForConfigEvent) Create(e event.CreateEvent, q workqueue.R func (h *EnqueueRequestForConfigEvent) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { newCm, ok := e.ObjectNew.(*corev1.ConfigMap) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1.Configmap,", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1.Configmap,", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) return } oldCm, ok := e.ObjectOld.(*corev1.ConfigMap) if !ok { - klog.Error(Format("fail to assert runtime Object %s/%s to v1.Configmap,", e.ObjectOld.GetNamespace(), e.ObjectOld.GetName())) + klog.Error(Format("could not assert runtime Object %s/%s to v1.Configmap,", e.ObjectOld.GetNamespace(), e.ObjectOld.GetName())) return } _, newProxyPort, newErr := net.SplitHostPort(newCm.Data[util.ProxyServerExposedPortKey]) diff --git a/pkg/yurtmanager/controller/raven/util/util.go b/pkg/yurtmanager/controller/raven/util/util.go index 4911f9765ca..68c67479e43 100644 --- a/pkg/yurtmanager/controller/raven/util/util.go +++ b/pkg/yurtmanager/controller/raven/util/util.go @@ -99,7 +99,7 @@ func HashObject(o interface{}) string { func PrettyYaml(obj interface{}) string { bs, err := yaml.Marshal(obj) if err != nil { - klog.Errorf("failed to parse yaml, %v", err.Error()) + klog.Errorf("could not parse yaml, %v", err.Error()) } return string(bs) } diff --git a/pkg/yurtmanager/controller/servicetopology/endpoints/endpoints_enqueue_handlers.go b/pkg/yurtmanager/controller/servicetopology/endpoints/endpoints_enqueue_handlers.go index febf3d35a51..b172f5a713b 100644 --- a/pkg/yurtmanager/controller/servicetopology/endpoints/endpoints_enqueue_handlers.go +++ b/pkg/yurtmanager/controller/servicetopology/endpoints/endpoints_enqueue_handlers.go @@ -43,13 +43,13 @@ func (e *EnqueueEndpointsForService) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { oldSvc, ok := evt.ObjectOld.(*corev1.Service) if !ok { - klog.Errorf(Format("Fail to assert runtime Object(%s) to v1.Service", + klog.Errorf(Format("could not assert runtime Object(%s) to v1.Service", evt.ObjectOld.GetName())) return } newSvc, ok := evt.ObjectNew.(*corev1.Service) if !ok { - klog.Errorf(Format("Fail to assert runtime Object(%s) to v1.Service", + klog.Errorf(Format("could not assert runtime Object(%s) to v1.Service", evt.ObjectNew.GetName())) return } @@ -74,7 +74,7 @@ func (e *EnqueueEndpointsForService) enqueueEndpointsForSvc(newSvc *corev1.Servi for _, key := range keys { ns, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - klog.Errorf("failed to split key %s, %v", key, err) + klog.Errorf("could not split key %s, %v", key, err) continue } q.AddRateLimited(reconcile.Request{ diff --git a/pkg/yurtmanager/controller/servicetopology/endpointslice/endpointslice_enqueue_handlers.go b/pkg/yurtmanager/controller/servicetopology/endpointslice/endpointslice_enqueue_handlers.go index 234af1bd95d..9d08f380b85 100644 --- a/pkg/yurtmanager/controller/servicetopology/endpointslice/endpointslice_enqueue_handlers.go +++ b/pkg/yurtmanager/controller/servicetopology/endpointslice/endpointslice_enqueue_handlers.go @@ -43,13 +43,13 @@ func (e *EnqueueEndpointsliceForService) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { oldSvc, ok := evt.ObjectOld.(*corev1.Service) if !ok { - klog.Errorf(Format("Fail to assert runtime Object(%s) to v1.Service", + klog.Errorf(Format("could not assert runtime Object(%s) to v1.Service", evt.ObjectOld.GetName())) return } newSvc, ok := evt.ObjectNew.(*corev1.Service) if !ok { - klog.Errorf(Format("Fail to assert runtime Object(%s) to v1.Service", + klog.Errorf(Format("could not assert runtime Object(%s) to v1.Service", evt.ObjectNew.GetName())) return } @@ -74,7 +74,7 @@ func (e *EnqueueEndpointsliceForService) enqueueEndpointsliceForSvc(newSvc *core for _, key := range keys { ns, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - klog.Errorf("failed to split key %s, %v", key, err) + klog.Errorf("could not split key %s, %v", key, err) continue } q.AddRateLimited(reconcile.Request{ diff --git a/pkg/yurtmanager/controller/util/node/controller_utils.go b/pkg/yurtmanager/controller/util/node/controller_utils.go index 97f7b4cd9ee..e9a1e92e201 100644 --- a/pkg/yurtmanager/controller/util/node/controller_utils.go +++ b/pkg/yurtmanager/controller/util/node/controller_utils.go @@ -170,7 +170,7 @@ func MarkPodsNotReady(ctx context.Context, c client.Client, recorder record.Even // There is nothing left to do with this pod. continue } - klog.InfoS("Failed to update status for pod", "pod", klog.KObj(pod), "err", err) + klog.InfoS("could not update status for pod", "pod", klog.KObj(pod), "err", err) errs = append(errs, err) } // record NodeNotReady event after updateStatus to make sure pod still exists @@ -303,7 +303,7 @@ func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName for _, taint := range taints { curNewNode, ok, err := taintutils.AddOrUpdateTaint(oldNodeCopy, taint) if err != nil { - return fmt.Errorf("failed to update taint of node") + return fmt.Errorf("could not update taint of node") } updated = updated || ok newNode = curNewNode @@ -360,7 +360,7 @@ func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName str for _, taint := range taints { curNewNode, ok, err := taintutils.RemoveTaint(oldNodeCopy, taint) if err != nil { - return fmt.Errorf("failed to remove taint of node") + return fmt.Errorf("could not remove taint of node") } updated = updated || ok newNode = curNewNode @@ -382,7 +382,7 @@ func PatchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string oldNodeNoRV.ResourceVersion = "" oldDataNoRV, err := json.Marshal(&oldNodeNoRV) if err != nil { - return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNodeNoRV, nodeName, err) + return fmt.Errorf("could not marshal old node %#v for node %q: %v", oldNodeNoRV, nodeName, err) } newTaints := newNode.Spec.Taints @@ -390,12 +390,12 @@ func PatchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string newNodeClone.Spec.Taints = newTaints newData, err := json.Marshal(newNodeClone) if err != nil { - return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err) + return fmt.Errorf("could not marshal new node %#v for node %q: %v", newNodeClone, nodeName, err) } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldDataNoRV, newData, v1.Node{}) if err != nil { - return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) + return fmt.Errorf("could not create patch for node %q: %v", nodeName, err) } _, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) @@ -430,18 +430,18 @@ func addOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la oldData, err := json.Marshal(node) if err != nil { - return fmt.Errorf("failed to marshal the existing node %#v: %v", node, err) + return fmt.Errorf("could not marshal the existing node %#v: %v", node, err) } newData, err := json.Marshal(newNode) if err != nil { - return fmt.Errorf("failed to marshal the new node %#v: %v", newNode, err) + return fmt.Errorf("could not marshal the new node %#v: %v", newNode, err) } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{}) if err != nil { - return fmt.Errorf("failed to create a two-way merge patch: %v", err) + return fmt.Errorf("could not create a two-way merge patch: %v", err) } if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - return fmt.Errorf("failed to patch the node: %v", err) + return fmt.Errorf("could not patch the node: %v", err) } return nil }) diff --git a/pkg/yurtmanager/controller/util/refmanager/ref_manager.go b/pkg/yurtmanager/controller/util/refmanager/ref_manager.go index 4e0a96abc04..dbe2cdc2bcb 100644 --- a/pkg/yurtmanager/controller/util/refmanager/ref_manager.go +++ b/pkg/yurtmanager/controller/util/refmanager/ref_manager.go @@ -108,7 +108,7 @@ func (mgr *RefManager) getOwner() (runtime.Object, error) { var getOwner = func(owner metav1.Object, schema *runtime.Scheme, c client.Client) (client.Object, error) { cliObj, ok := owner.(client.Object) if !ok { - return nil, fmt.Errorf("fail to convert %s/%s to runtime object", owner.GetNamespace(), owner.GetName()) + return nil, fmt.Errorf("could not convert %s/%s to runtime object", owner.GetNamespace(), owner.GetName()) } kinds, _, err := schema.ObjectKinds(cliObj) @@ -122,7 +122,7 @@ var getOwner = func(owner metav1.Object, schema *runtime.Scheme, c client.Client } obj, ok := runtimeObj.(client.Object) if !ok { - return nil, fmt.Errorf("fail to convert %s/%s to client object", owner.GetNamespace(), owner.GetName()) + return nil, fmt.Errorf("could not convert %s/%s to client object", owner.GetNamespace(), owner.GetName()) } return obj, c.Get(context.TODO(), client.ObjectKey{Namespace: owner.GetNamespace(), Name: owner.GetName()}, obj) @@ -176,7 +176,7 @@ func (mgr *RefManager) adopt(obj metav1.Object) error { runtimeObj, ok := obj.(client.Object) if !ok { - return fmt.Errorf("can't update Object %v/%v (%v) owner reference: fail to cast to runtime.Object", obj.GetNamespace(), obj.GetName(), obj.GetUID()) + return fmt.Errorf("can't update Object %v/%v (%v) owner reference: could not cast to runtime.Object", obj.GetNamespace(), obj.GetName(), obj.GetUID()) } if err := mgr.updateOwnee(runtimeObj); err != nil { @@ -196,7 +196,7 @@ func (mgr *RefManager) release(obj metav1.Object) error { if idx > -1 { runtimeObj, ok := obj.(client.Object) if !ok { - return fmt.Errorf("can't remove Pod %v/%v (%v) owner reference: fail to cast to runtime.Object", obj.GetNamespace(), obj.GetName(), obj.GetUID()) + return fmt.Errorf("can't remove Pod %v/%v (%v) owner reference: could not cast to runtime.Object", obj.GetNamespace(), obj.GetName(), obj.GetUID()) } obj.SetOwnerReferences(append(obj.GetOwnerReferences()[:idx], obj.GetOwnerReferences()[idx+1:]...)) diff --git a/pkg/yurtmanager/controller/yurtappdaemon/workloadcontroller/deployment_controller.go b/pkg/yurtmanager/controller/yurtappdaemon/workloadcontroller/deployment_controller.go index d61b2194dab..634e95aea08 100644 --- a/pkg/yurtmanager/controller/yurtappdaemon/workloadcontroller/deployment_controller.go +++ b/pkg/yurtmanager/controller/yurtappdaemon/workloadcontroller/deployment_controller.go @@ -52,7 +52,7 @@ func (d *DeploymentControllor) DeleteWorkload(yda *v1alpha1.YurtAppDaemon, load set := load.Spec.Ref.(runtime.Object) cliSet, ok := set.(client.Object) if !ok { - return errors.New("fail to convert runtime.Object to client.Object") + return errors.New("could not convert runtime.Object to client.Object") } return d.Delete(context.TODO(), cliSet, client.PropagationPolicy(metav1.DeletePropagationBackground)) } @@ -146,7 +146,7 @@ func (d *DeploymentControllor) CreateWorkload(yad *v1alpha1.YurtAppDaemon, nodep deploy := appsv1.Deployment{} if err := d.applyTemplate(d.Scheme, yad, nodepool, revision, &deploy); err != nil { - klog.Errorf("YurtAppDaemon[%s/%s] faild to apply template, when create deployment: %v", yad.GetNamespace(), + klog.Errorf("YurtAppDaemon[%s/%s] could not apply template, when create deployment: %v", yad.GetNamespace(), yad.GetName(), err) return err } diff --git a/pkg/yurtmanager/controller/yurtappdaemon/yurtappdaemon_controller.go b/pkg/yurtmanager/controller/yurtappdaemon/yurtappdaemon_controller.go index f6d93554b4c..02ee4ea3d6e 100644 --- a/pkg/yurtmanager/controller/yurtappdaemon/yurtappdaemon_controller.go +++ b/pkg/yurtmanager/controller/yurtappdaemon/yurtappdaemon_controller.go @@ -159,7 +159,7 @@ func (r *ReconcileYurtAppDaemon) Reconcile(_ context.Context, request reconcile. currentRevision, updatedRevision, collisionCount, err := r.constructYurtAppDaemonRevisions(instance) if err != nil { - klog.Errorf("Fail to construct controller revision of YurtAppDaemon %s/%s: %s", instance.Namespace, instance.Name, err) + klog.Errorf("could not construct controller revision of YurtAppDaemon %s/%s: %s", instance.Namespace, instance.Name, err) r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed%s", eventTypeRevisionProvision), err.Error()) return reconcile.Result{}, err } @@ -179,19 +179,19 @@ func (r *ReconcileYurtAppDaemon) Reconcile(_ context.Context, request reconcile. } if control == nil { - r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("YurtAppDaemon[%s/%s] Fail to get control", instance.Namespace, instance.Name), fmt.Sprintf("fail to find control")) - return reconcile.Result{}, fmt.Errorf("fail to find control") + r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("YurtAppDaemon[%s/%s] could not get control", instance.Namespace, instance.Name), fmt.Sprintf("could not find control")) + return reconcile.Result{}, fmt.Errorf("could not find control") } currentNPToWorkload, err := r.getNodePoolToWorkLoad(instance, control) if err != nil { - klog.Errorf("YurtAppDaemon[%s/%s] Fail to get nodePoolWorkload, error: %s", instance.Namespace, instance.Name, err) + klog.Errorf("YurtAppDaemon[%s/%s] could not get nodePoolWorkload, error: %s", instance.Namespace, instance.Name, err) return reconcile.Result{}, err } allNameToNodePools, err := r.getNameToNodePools(instance) if err != nil { - klog.Errorf("YurtAppDaemon[%s/%s] Fail to get nameToNodePools, error: %s", instance.Namespace, instance.Name, err) + klog.Errorf("YurtAppDaemon[%s/%s] could not get nameToNodePools, error: %s", instance.Namespace, instance.Name, err) return reconcile.Result{}, err } @@ -247,7 +247,7 @@ func (r *ReconcileYurtAppDaemon) updateYurtAppDaemon(yad *unitv1alpha1.YurtAppDa obj = tmpObj } - klog.Errorf("fail to update YurtAppDaemon %s/%s status: %s", yad.Namespace, yad.Name, updateErr) + klog.Errorf("could not update YurtAppDaemon %s/%s status: %s", yad.Namespace, yad.Name, updateErr) return nil, updateErr } @@ -306,7 +306,7 @@ func (r *ReconcileYurtAppDaemon) manageWorkloads(instance *unitv1alpha1.YurtAppD provision, err := r.manageWorkloadsProvision(instance, allNameToNodePools, expectedRevision, templateType, needDeleted, needCreate) if err != nil { SetYurtAppDaemonCondition(newStatus, NewYurtAppDaemonCondition(unitv1alpha1.WorkLoadProvisioned, corev1.ConditionFalse, "Error", err.Error())) - return newStatus, fmt.Errorf("fail to manage workload provision: %v", err) + return newStatus, fmt.Errorf("could not manage workload provision: %v", err) } if provision { @@ -443,7 +443,7 @@ func (r *ReconcileYurtAppDaemon) getNameToNodePools(instance *unitv1alpha1.YurtA nodepools := unitv1alpha1.NodePoolList{} if err := r.Client.List(context.TODO(), &nodepools, &client.ListOptions{LabelSelector: nodepoolSelector}); err != nil { - klog.Errorf("YurtAppDaemon [%s/%s] Fail to get NodePoolList", instance.GetNamespace(), + klog.Errorf("YurtAppDaemon [%s/%s] could not get NodePoolList", instance.GetNamespace(), instance.GetName()) return nil, nil } diff --git a/pkg/yurtmanager/controller/yurtappset/adapter/deployment_adapter.go b/pkg/yurtmanager/controller/yurtappset/adapter/deployment_adapter.go index 4d7ec32a7ff..7b6c55f1e0f 100644 --- a/pkg/yurtmanager/controller/yurtappset/adapter/deployment_adapter.go +++ b/pkg/yurtmanager/controller/yurtappset/adapter/deployment_adapter.go @@ -99,7 +99,7 @@ func (a *DeploymentAdapter) ApplyPoolTemplate(yas *alpha1.YurtAppSet, poolName, } } if poolConfig == nil { - return fmt.Errorf("fail to find pool config %s", poolName) + return fmt.Errorf("could not find pool config %s", poolName) } set.Namespace = yas.Namespace diff --git a/pkg/yurtmanager/controller/yurtappset/adapter/statefulset_adapter.go b/pkg/yurtmanager/controller/yurtappset/adapter/statefulset_adapter.go index 555d21be578..7d48cbea2dd 100644 --- a/pkg/yurtmanager/controller/yurtappset/adapter/statefulset_adapter.go +++ b/pkg/yurtmanager/controller/yurtappset/adapter/statefulset_adapter.go @@ -104,7 +104,7 @@ func (a *StatefulSetAdapter) ApplyPoolTemplate(yas *alpha1.YurtAppSet, poolName, } } if poolConfig == nil { - return fmt.Errorf("fail to find pool config %s", poolName) + return fmt.Errorf("could not find pool config %s", poolName) } set.Namespace = yas.Namespace diff --git a/pkg/yurtmanager/controller/yurtappset/pool_control.go b/pkg/yurtmanager/controller/yurtappset/pool_control.go index 6a6ba35d536..d72801d4e2b 100644 --- a/pkg/yurtmanager/controller/yurtappset/pool_control.go +++ b/pkg/yurtmanager/controller/yurtappset/pool_control.go @@ -52,7 +52,7 @@ func (m *PoolControl) GetAllPools(yas *alpha1.YurtAppSet) (pools []*Pool, err er setList := m.adapter.NewResourceListObject() cliSetList, ok := setList.(client.ObjectList) if !ok { - return nil, errors.New("fail to convert runtime object to client.ObjectList") + return nil, errors.New("could not convert runtime object to client.ObjectList") } err = m.Client.List(context.TODO(), cliSetList, &client.ListOptions{LabelSelector: selector}) if err != nil { @@ -94,7 +94,7 @@ func (m *PoolControl) CreatePool(yas *alpha1.YurtAppSet, poolName string, revisi klog.V(4).Infof("Have %d replicas when creating Pool for YurtAppSet %s/%s", replicas, yas.Namespace, yas.Name) cliSet, ok := set.(client.Object) if !ok { - return errors.New("fail to convert runtime.Object to client.Object") + return errors.New("could not convert runtime.Object to client.Object") } return m.Create(context.TODO(), cliSet) } @@ -104,7 +104,7 @@ func (m *PoolControl) UpdatePool(pool *Pool, yas *alpha1.YurtAppSet, revision st set := m.adapter.NewResourceObject() cliSet, ok := set.(client.Object) if !ok { - return errors.New("fail to convert runtime.Object to client.Object") + return errors.New("could not convert runtime.Object to client.Object") } var updateError error for i := 0; i < updateRetries; i++ { @@ -134,7 +134,7 @@ func (m *PoolControl) DeletePool(pool *Pool) error { set := pool.Spec.PoolRef.(runtime.Object) cliSet, ok := set.(client.Object) if !ok { - return errors.New("fail to convert runtime.Object to client.Object") + return errors.New("could not convert runtime.Object to client.Object") } return m.Delete(context.TODO(), cliSet, client.PropagationPolicy(metav1.DeletePropagationBackground)) } diff --git a/pkg/yurtmanager/controller/yurtappset/yurtappset_controller.go b/pkg/yurtmanager/controller/yurtappset/yurtappset_controller.go index fc8e7fe0fa3..32f0d7656ee 100644 --- a/pkg/yurtmanager/controller/yurtappset/yurtappset_controller.go +++ b/pkg/yurtmanager/controller/yurtappset/yurtappset_controller.go @@ -172,7 +172,7 @@ func (r *ReconcileYurtAppSet) Reconcile(_ context.Context, request reconcile.Req currentRevision, updatedRevision, collisionCount, err := r.constructYurtAppSetRevisions(instance) if err != nil { - klog.Errorf("Fail to construct controller revision of YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) + klog.Errorf("could not construct controller revision of YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed%s", eventTypeRevisionProvision), err.Error()) return reconcile.Result{}, err } @@ -187,7 +187,7 @@ func (r *ReconcileYurtAppSet) Reconcile(_ context.Context, request reconcile.Req nameToPool, err := r.getNameToPool(instance, control) if err != nil { - klog.Errorf("Fail to get Pools of YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) + klog.Errorf("could not get Pools of YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed %s", eventTypeFindPools), err.Error()) return reconcile.Result{}, nil @@ -202,7 +202,7 @@ func (r *ReconcileYurtAppSet) Reconcile(_ context.Context, request reconcile.Req } newStatus, err := r.managePools(instance, nameToPool, nextPatches, expectedRevision, poolType) if err != nil { - klog.Errorf("Fail to update YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) + klog.Errorf("could not update YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed%s", eventTypePoolsUpdate), err.Error()) } @@ -213,7 +213,7 @@ func (r *ReconcileYurtAppSet) getNameToPool(instance *unitv1alpha1.YurtAppSet, c pools, err := control.GetAllPools(instance) if err != nil { r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed%s", eventTypeFindPools), err.Error()) - return nil, fmt.Errorf("fail to get all Pools for YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) + return nil, fmt.Errorf("could not get all Pools for YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) } klog.V(4).Infof("Classify YurtAppSet %s/%s by pool name", instance.Namespace, instance.Name) @@ -222,7 +222,7 @@ func (r *ReconcileYurtAppSet) getNameToPool(instance *unitv1alpha1.YurtAppSet, c nameToPool, err := r.deleteDupPool(nameToPools, control) if err != nil { r.recorder.Event(instance.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed%s", eventTypeDupPoolsDelete), err.Error()) - return nil, fmt.Errorf("fail to manage duplicate Pool of YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) + return nil, fmt.Errorf("could not manage duplicate Pool of YurtAppSet %s/%s: %s", instance.Namespace, instance.Name, err) } return nameToPool, nil @@ -301,7 +301,7 @@ func (r *ReconcileYurtAppSet) calculateStatus(instance *unitv1alpha1.YurtAppSet, var poolFailure *string overriderList := unitv1alpha1.YurtAppOverriderList{} if err := r.List(context.TODO(), &overriderList); err != nil { - message := fmt.Sprintf("fail to list yurtappoverrider: %v", err) + message := fmt.Sprintf("could not list yurtappoverrider: %v", err) poolFailure = &message } for _, overrider := range overriderList.Items { @@ -393,6 +393,6 @@ func (r *ReconcileYurtAppSet) updateYurtAppSet(yas *unitv1alpha1.YurtAppSet, old obj = tmpObj } - klog.Errorf("fail to update YurtAppSet %s/%s status: %s", yas.Namespace, yas.Name, updateErr) + klog.Errorf("could not update YurtAppSet %s/%s status: %s", yas.Namespace, yas.Name, updateErr) return nil, updateErr } diff --git a/pkg/yurtmanager/controller/yurtappset/yurtappset_controller_utils.go b/pkg/yurtmanager/controller/yurtappset/yurtappset_controller_utils.go index 3c7d22a560c..af3c4e90f40 100644 --- a/pkg/yurtmanager/controller/yurtappset/yurtappset_controller_utils.go +++ b/pkg/yurtmanager/controller/yurtappset/yurtappset_controller_utils.go @@ -42,11 +42,11 @@ type YurtAppSetPatches struct { func getPoolNameFrom(metaObj metav1.Object) (string, error) { name, exist := metaObj.GetLabels()[apps.PoolNameLabelKey] if !exist { - return "", fmt.Errorf("fail to get pool name from label of pool %s/%s: no label %s found", metaObj.GetNamespace(), metaObj.GetName(), apps.PoolNameLabelKey) + return "", fmt.Errorf("could not get pool name from label of pool %s/%s: no label %s found", metaObj.GetNamespace(), metaObj.GetName(), apps.PoolNameLabelKey) } if len(name) == 0 { - return "", fmt.Errorf("fail to get pool name from label of pool %s/%s: label %s has an empty value", metaObj.GetNamespace(), metaObj.GetName(), apps.PoolNameLabelKey) + return "", fmt.Errorf("could not get pool name from label of pool %s/%s: label %s has an empty value", metaObj.GetNamespace(), metaObj.GetName(), apps.PoolNameLabelKey) } return name, nil diff --git a/pkg/yurtmanager/controller/yurtappset/yurtappset_update.go b/pkg/yurtmanager/controller/yurtappset/yurtappset_update.go index 6a73afd915b..509c8bd654c 100644 --- a/pkg/yurtmanager/controller/yurtappset/yurtappset_update.go +++ b/pkg/yurtmanager/controller/yurtappset/yurtappset_update.go @@ -44,7 +44,7 @@ func (r *ReconcileYurtAppSet) managePools(yas *unitv1alpha1.YurtAppSet, exists, provisioned, err := r.managePoolProvision(yas, nameToPool, nextPatches, expectedRevision, poolType) if err != nil { SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1alpha1.PoolProvisioned, corev1.ConditionFalse, "Error", err.Error())) - return newStatus, fmt.Errorf("fail to manage Pool provision: %s", err) + return newStatus, fmt.Errorf("could not manage Pool provision: %s", err) } if provisioned { @@ -140,7 +140,7 @@ func (r *ReconcileYurtAppSet) managePoolProvision(yas *unitv1alpha1.YurtAppSet, err := r.poolControls[workloadType].CreatePool(yas, poolName, revision, replicas) if err != nil { if !errors.IsTimeout(err) { - return fmt.Errorf("fail to create Pool (%s) %s: %s", workloadType, poolName, err.Error()) + return fmt.Errorf("could not create Pool (%s) %s: %s", workloadType, poolName, err.Error()) } } @@ -160,7 +160,7 @@ func (r *ReconcileYurtAppSet) managePoolProvision(yas *unitv1alpha1.YurtAppSet, for _, poolName := range deletes { pool := nameToPool[poolName] if err := r.poolControls[workloadType].DeletePool(pool); err != nil { - deleteErrs = append(deleteErrs, fmt.Errorf("fail to delete Pool (%s) %s/%s for %s: %s", workloadType, pool.Namespace, pool.Name, poolName, err)) + deleteErrs = append(deleteErrs, fmt.Errorf("could not delete Pool (%s) %s/%s for %s: %s", workloadType, pool.Namespace, pool.Name, poolName, err)) } } @@ -181,14 +181,14 @@ func (r *ReconcileYurtAppSet) managePoolProvision(yas *unitv1alpha1.YurtAppSet, pools, err := control.GetAllPools(yas) if err != nil { - errs = append(errs, fmt.Errorf("fail to list Pool of other type %s for YurtAppSet %s/%s: %s", t, yas.Namespace, yas.Name, err)) + errs = append(errs, fmt.Errorf("could not list Pool of other type %s for YurtAppSet %s/%s: %s", t, yas.Namespace, yas.Name, err)) continue } for _, pool := range pools { cleaned = true if err := control.DeletePool(pool); err != nil { - errs = append(errs, fmt.Errorf("fail to delete Pool %s of other type %s for YurtAppSet %s/%s: %s", pool.Name, t, yas.Namespace, yas.Name, err)) + errs = append(errs, fmt.Errorf("could not delete Pool %s of other type %s for YurtAppSet %s/%s: %s", pool.Name, t, yas.Namespace, yas.Name, err)) continue } } diff --git a/pkg/yurtmanager/controller/yurtcoordinator/cert/certificate.go b/pkg/yurtmanager/controller/yurtcoordinator/cert/certificate.go index 7c5bf2d68b9..dd44020932f 100644 --- a/pkg/yurtmanager/controller/yurtcoordinator/cert/certificate.go +++ b/pkg/yurtmanager/controller/yurtcoordinator/cert/certificate.go @@ -368,22 +368,22 @@ func WriteCertAndKeyIntoSecret(clientSet client.Interface, certName, secretName if key != nil { keyPEM, err := keyutil.MarshalPrivateKeyToPEM(key) if err != nil { - return errors.Wrapf(err, "fail to write %s.key into secret %s", certName, secretName) + return errors.Wrapf(err, "could not write %s.key into secret %s", certName, secretName) } err = secretClient.AddData(fmt.Sprintf("%s.key", certName), keyPEM) if err != nil { - return errors.Wrapf(err, "fail to write %s.key into secret %s", certName, secretName) + return errors.Wrapf(err, "could not write %s.key into secret %s", certName, secretName) } } if cert != nil { certPEM, err := EncodeCertPEM(cert) if err != nil { - return errors.Wrapf(err, "fail to write %s.cert into secret %s", certName, secretName) + return errors.Wrapf(err, "could not write %s.cert into secret %s", certName, secretName) } err = secretClient.AddData(fmt.Sprintf("%s.crt", certName), certPEM) if err != nil { - return errors.Wrapf(err, "fail to write %s.cert into secret %s", certName, secretName) + return errors.Wrapf(err, "could not write %s.cert into secret %s", certName, secretName) } } @@ -415,21 +415,21 @@ func WriteKeyPairIntoSecret(clientSet client.Interface, secretName, keyName stri privateKeyPEM, err := keyutil.MarshalPrivateKeyToPEM(key) if err != nil { - return errors.Wrapf(err, "fail to marshal private key into PEM format %s", keyName) + return errors.Wrapf(err, "could not marshal private key into PEM format %s", keyName) } err = secretClient.AddData(fmt.Sprintf("%s.key", keyName), privateKeyPEM) if err != nil { - return errors.Wrapf(err, "fail to write %s.key into secret %s", keyName, secretName) + return errors.Wrapf(err, "could not write %s.key into secret %s", keyName, secretName) } publicKey := key.Public() publicKeyPEM, err := EncodePublicKeyPEM(publicKey) if err != nil { - return errors.Wrapf(err, "fail to marshal public key into PEM format %s", keyName) + return errors.Wrapf(err, "could not marshal public key into PEM format %s", keyName) } err = secretClient.AddData(fmt.Sprintf("%s.pub", keyName), publicKeyPEM) if err != nil { - return errors.Wrapf(err, "fail to write %s.pub into secret %s", keyName, secretName) + return errors.Wrapf(err, "could not write %s.pub into secret %s", keyName, secretName) } klog.Infof(Format("successfully write key pair into secret %s", secretName)) diff --git a/pkg/yurtmanager/controller/yurtcoordinator/cert/secret.go b/pkg/yurtmanager/controller/yurtcoordinator/cert/secret.go index 7ce185c5258..94db3d93099 100644 --- a/pkg/yurtmanager/controller/yurtcoordinator/cert/secret.go +++ b/pkg/yurtmanager/controller/yurtcoordinator/cert/secret.go @@ -84,7 +84,7 @@ func (c *SecretClient) AddData(key string, val []byte) error { func (c *SecretClient) GetData(key string) ([]byte, error) { secret, err := c.client.CoreV1().Secrets(c.Namespace).Get(context.TODO(), c.Name, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrap(err, "fail to get secret from secretClient") + return nil, errors.Wrap(err, "could not get secret from secretClient") } val, ok := secret.Data[key] diff --git a/pkg/yurtmanager/controller/yurtcoordinator/cert/yurtcoordinatorcert_controller.go b/pkg/yurtmanager/controller/yurtcoordinator/cert/yurtcoordinatorcert_controller.go index a560b3514c0..b29d0bcd74c 100644 --- a/pkg/yurtmanager/controller/yurtcoordinator/cert/yurtcoordinatorcert_controller.go +++ b/pkg/yurtmanager/controller/yurtcoordinator/cert/yurtcoordinatorcert_controller.go @@ -293,7 +293,7 @@ type ReconcileYurtCoordinatorCert struct { func (r *ReconcileYurtCoordinatorCert) InjectConfig(cfg *rest.Config) error { kubeClient, err := client.NewForConfig(cfg) if err != nil { - klog.Errorf("failed to create kube client, %v", err) + klog.Errorf("could not create kube client, %v", err) return err } r.kubeClient = kubeClient @@ -366,7 +366,7 @@ func (r *ReconcileYurtCoordinatorCert) initYurtCoordinator(allSelfSignedCerts [] ips, _, err := certConf.certInit(r.kubeClient, stopCh) if err != nil { // if cert init failed, skip this cert - klog.Errorf(Format("fail to init cert %s when checking dynamic attrs: %v", certConf.CertName, err)) + klog.Errorf(Format("could not init cert %s when checking dynamic attrs: %v", certConf.CertName, err)) continue } else { // check if dynamic IP addresses already exist in cert @@ -417,12 +417,12 @@ func initCA(clientSet client.Interface) (caCert *x509.Certificate, caKey crypto. // write it into the secret caCert, caKey, err = NewSelfSignedCA() if err != nil { - return nil, nil, false, errors.Wrap(err, "fail to new self CA assets when initializing yurtcoordinator") + return nil, nil, false, errors.Wrap(err, "could not new self CA assets when initializing yurtcoordinator") } err = WriteCertAndKeyIntoSecret(clientSet, "ca", YurtCoordinatorCASecretName, caCert, caKey) if err != nil { - return nil, nil, false, errors.Wrap(err, "fail to write CA assets into secret when initializing yurtcoordinator") + return nil, nil, false, errors.Wrap(err, "could not write CA assets into secret when initializing yurtcoordinator") } } @@ -438,7 +438,7 @@ func initAPIServerClientCert(clientSet client.Interface, stopCh <-chan struct{}) klog.Infof("apiserver-kubelet-client cert has already existed in secret %s", YurtCoordinatorStaticSecretName) return nil } else if err != nil { - klog.Errorf("fail to get apiserver-kubelet-client cert in secret(%s), %v, and new cert will be created", YurtCoordinatorStaticSecretName, err) + klog.Errorf("could not get apiserver-kubelet-client cert in secret(%s), %v, and new cert will be created", YurtCoordinatorStaticSecretName, err) } certMgr, err := certfactory.NewCertManagerFactory(clientSet).New(&certfactory.CertManagerConfig{ @@ -465,7 +465,7 @@ func initNodeLeaseProxyClient(clientSet client.Interface, stopCh <-chan struct{} klog.Infof("node-lease-proxy-client cert has already existed in secret %s", YurtCoordinatorYurthubClientSecretName) return nil } else if err != nil { - klog.Errorf("fail to get node-lease-proxy-client cert in secret(%s), %v, and new cert will be created", YurtCoordinatorYurthubClientSecretName, err) + klog.Errorf("could not get node-lease-proxy-client cert in secret(%s), %v, and new cert will be created", YurtCoordinatorYurthubClientSecretName, err) } certMgr, err := certfactory.NewCertManagerFactory(clientSet).New(&certfactory.CertManagerConfig{ @@ -487,7 +487,7 @@ func initNodeLeaseProxyClient(clientSet client.Interface, stopCh <-chan struct{} func initSAKeyPair(clientSet client.Interface, keyName, secretName string) (err error) { key, err := NewPrivateKey() if err != nil { - return errors.Wrap(err, "fail to create sa key pair") + return errors.Wrap(err, "could not create sa key pair") } return WriteKeyPairIntoSecret(clientSet, secretName, keyName, key) diff --git a/pkg/yurtmanager/controller/yurtcoordinator/delegatelease/delegatelease_controller.go b/pkg/yurtmanager/controller/yurtcoordinator/delegatelease/delegatelease_controller.go index d994ef63b4c..dddff7bd268 100644 --- a/pkg/yurtmanager/controller/yurtcoordinator/delegatelease/delegatelease_controller.go +++ b/pkg/yurtmanager/controller/yurtcoordinator/delegatelease/delegatelease_controller.go @@ -214,7 +214,7 @@ func (r *ReconcileDelegateLease) InjectClient(c client.Client) error { func (r *ReconcileDelegateLease) InjectConfig(cfg *rest.Config) error { client, err := kubernetes.NewForConfig(cfg) if err != nil { - klog.Errorf("failed to create kube client, %v", err) + klog.Errorf("could not create kube client, %v", err) return err } r.dlClient = client diff --git a/pkg/yurtmanager/controller/yurtcoordinator/podbinding/podbinding_controller.go b/pkg/yurtmanager/controller/yurtcoordinator/podbinding/podbinding_controller.go index 2b13609c3c7..7527e85695a 100644 --- a/pkg/yurtmanager/controller/yurtcoordinator/podbinding/podbinding_controller.go +++ b/pkg/yurtmanager/controller/yurtcoordinator/podbinding/podbinding_controller.go @@ -105,7 +105,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { // return []string{} //}) //if err != nil { - // klog.Errorf(Format("failed to register field indexers for podbinding controller, %v", err)) + // klog.Errorf(Format("could not register field indexers for podbinding controller, %v", err)) //} //return err } @@ -155,12 +155,12 @@ func (r *ReconcilePodBinding) processNode(node *corev1.Node) error { // pod binding takes precedence against node autonomy if nodeutil.IsPodBoundenToNode(node) { if err := r.configureTolerationForPod(pod, nil); err != nil { - klog.Errorf(Format("failed to configure toleration of pod, %v", err)) + klog.Errorf(Format("could not configure toleration of pod, %v", err)) } } else { tolerationSeconds := int64(defaultTolerationSeconds) if err := r.configureTolerationForPod(pod, &tolerationSeconds); err != nil { - klog.Errorf(Format("failed to configure toleration of pod, %v", err)) + klog.Errorf(Format("could not configure toleration of pod, %v", err)) } } } @@ -177,7 +177,7 @@ func (r *ReconcilePodBinding) getPodsAssignedToNode(name string) ([]corev1.Pod, podList := &corev1.PodList{} err := r.List(context.TODO(), podList, listOptions) if err != nil { - klog.Errorf(Format("failed to get podList for node(%s), %v", name, err)) + klog.Errorf(Format("could not get podList for node(%s), %v", name, err)) return nil, err } return podList.Items, nil @@ -198,7 +198,7 @@ func (r *ReconcilePodBinding) configureTolerationForPod(pod *corev1.Pod, tolerat } err := r.Update(context.TODO(), pod, &client.UpdateOptions{}) if err != nil { - klog.Errorf(Format("failed to update toleration of pod(%s/%s), %v", pod.Namespace, pod.Name, err)) + klog.Errorf(Format("could not update toleration of pod(%s/%s), %v", pod.Namespace, pod.Name, err)) return err } } diff --git a/pkg/yurtmanager/controller/yurtstaticset/yurtstaticset_controller.go b/pkg/yurtmanager/controller/yurtstaticset/yurtstaticset_controller.go index 232762cce99..707c64644df 100644 --- a/pkg/yurtmanager/controller/yurtstaticset/yurtstaticset_controller.go +++ b/pkg/yurtmanager/controller/yurtstaticset/yurtstaticset_controller.go @@ -287,7 +287,7 @@ func (r *ReconcileYurtStaticSet) Reconcile(_ context.Context, request reconcile. if kerr.IsNotFound(err) { return reconcile.Result{}, r.deleteConfigMap(request.Name, request.Namespace) } - klog.Errorf("Fail to get YurtStaticSet %v, %v", request.NamespacedName, err) + klog.Errorf("could not get YurtStaticSet %v, %v", request.NamespacedName, err) return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -325,13 +325,13 @@ func (r *ReconcileYurtStaticSet) Reconcile(_ context.Context, request reconcile. // The above hash value will be added to the annotation latestManifest, err := util.GenStaticPodManifest(&instance.Spec.Template, latestHash) if err != nil { - klog.Errorf(Format("Fail to generate static pod manifest of YurtStaticSet %v, %v", request.NamespacedName, err)) + klog.Errorf(Format("could not generate static pod manifest of YurtStaticSet %v, %v", request.NamespacedName, err)) return ctrl.Result{}, err } // Sync the corresponding configmap to the latest state if err := r.syncConfigMap(instance, latestHash, latestManifest); err != nil { - klog.Errorf(Format("Fail to sync the corresponding configmap of YurtStaticSet %v, %v", request.NamespacedName, err)) + klog.Errorf(Format("could not sync the corresponding configmap of YurtStaticSet %v, %v", request.NamespacedName, err)) return ctrl.Result{}, err } @@ -339,13 +339,13 @@ func (r *ReconcileYurtStaticSet) Reconcile(_ context.Context, request reconcile. upgradeInfos, err := upgradeinfo.New(r.Client, instance, UpgradeWorkerPodPrefix, latestHash) if err != nil { // The worker pod is failed, then some irreparable failure has occurred. Just stop reconcile and update status - if strings.Contains(err.Error(), "fail to init worker pod") { + if strings.Contains(err.Error(), "could not init worker pod") { r.recorder.Eventf(instance, corev1.EventTypeWarning, "YurtStaticSet Upgrade Failed", err.Error()) klog.Errorf(err.Error()) return reconcile.Result{}, err } - klog.Errorf(Format("Fail to get static pod and worker pod upgrade info for nodes of YurtStaticSet %v, %v", + klog.Errorf(Format("could not get static pod and worker pod upgrade info for nodes of YurtStaticSet %v, %v", request.NamespacedName, err)) return ctrl.Result{}, err } @@ -361,7 +361,7 @@ func (r *ReconcileYurtStaticSet) Reconcile(_ context.Context, request reconcile. // Clean up unused pods if err := r.removeUnusedPods(deletePods); err != nil { - klog.Errorf(Format("Fail to remove unused pods of YurtStaticSet %v, %v", request.NamespacedName, err)) + klog.Errorf(Format("could not remove unused pods of YurtStaticSet %v, %v", request.NamespacedName, err)) return reconcile.Result{}, err } @@ -382,7 +382,7 @@ func (r *ReconcileYurtStaticSet) Reconcile(_ context.Context, request reconcile. } if err := r.advancedRollingUpdate(instance, upgradeInfos, latestHash); err != nil { - klog.Errorf(Format("Fail to AdvancedRollingUpdate upgrade of YurtStaticSet %v, %v", request.NamespacedName, err)) + klog.Errorf(Format("could not AdvancedRollingUpdate upgrade of YurtStaticSet %v, %v", request.NamespacedName, err)) return ctrl.Result{}, err } return r.updateYurtStaticSetStatus(instance, totalNumber, readyNumber, upgradedNumber) @@ -391,7 +391,7 @@ func (r *ReconcileYurtStaticSet) Reconcile(_ context.Context, request reconcile. // It will set PodNeedUpgrade condition and work with YurtHub component case appsv1alpha1.OTAUpgradeStrategyType: if err := r.otaUpgrade(upgradeInfos); err != nil { - klog.Errorf(Format("Fail to OTA upgrade of YurtStaticSet %v, %v", request.NamespacedName, err)) + klog.Errorf(Format("could not OTA upgrade of YurtStaticSet %v, %v", request.NamespacedName, err)) return ctrl.Result{}, err } return r.updateYurtStaticSetStatus(instance, totalNumber, readyNumber, upgradedNumber) diff --git a/pkg/yurtmanager/webhook/deploymentrender/v1alpha1/deploymentrender_default.go b/pkg/yurtmanager/webhook/deploymentrender/v1alpha1/deploymentrender_default.go index e1433cf75ce..035af486496 100644 --- a/pkg/yurtmanager/webhook/deploymentrender/v1alpha1/deploymentrender_default.go +++ b/pkg/yurtmanager/webhook/deploymentrender/v1alpha1/deploymentrender_default.go @@ -145,7 +145,7 @@ func (webhook *DeploymentRenderHandler) Default(ctx context.Context, obj runtime dataStruct: dataStruct, } if err := pc.jsonMergePatch(); err != nil { - klog.Infof("fail to update patches for deployment: %v", err) + klog.Infof("could not update patches for deployment: %v", err) return err } break diff --git a/pkg/yurtmanager/webhook/deploymentrender/v1alpha1/deploymentrender_webhook_test.go b/pkg/yurtmanager/webhook/deploymentrender/v1alpha1/deploymentrender_webhook_test.go index 1bd80a09cd8..d64caf88a65 100644 --- a/pkg/yurtmanager/webhook/deploymentrender/v1alpha1/deploymentrender_webhook_test.go +++ b/pkg/yurtmanager/webhook/deploymentrender/v1alpha1/deploymentrender_webhook_test.go @@ -302,11 +302,11 @@ func TestDeploymentRenderHandler_Default(t *testing.T) { } scheme := runtime.NewScheme() if err := v1alpha1.AddToScheme(scheme); err != nil { - t.Logf("failed to add yurt custom resource") + t.Logf("could not add yurt custom resource") return } if err := clientgoscheme.AddToScheme(scheme); err != nil { - t.Logf("failed to add kubernetes clint-go custom resource") + t.Logf("could not add kubernetes clint-go custom resource") return } for _, tcase := range tcases { diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation.go b/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation.go index 5c14370f339..0891f671c40 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation.go @@ -140,7 +140,7 @@ func validateNodePoolDeletion(cli client.Client, np *appsv1beta1.NodePool) field if err := cli.List(context.TODO(), &nodes, client.MatchingLabels(map[string]string{apps.NodePoolLabel: np.Name})); err != nil { return field.ErrorList([]*field.Error{ field.Forbidden(field.NewPath("metadata").Child("name"), - "fail to get nodes associated to the pool")}) + "could not get nodes associated to the pool")}) } if len(nodes.Items) != 0 { return field.ErrorList([]*field.Error{ diff --git a/pkg/yurtmanager/webhook/server.go b/pkg/yurtmanager/webhook/server.go index c92b7cd9dea..f2777b0f671 100644 --- a/pkg/yurtmanager/webhook/server.go +++ b/pkg/yurtmanager/webhook/server.go @@ -158,6 +158,6 @@ func Initialize(ctx context.Context, cc *config.CompletedConfig, restCfg *rest.C case <-webhookcontroller.Inited(): return nil case <-timer.C: - return fmt.Errorf("failed to prepare certificate for webhook within 20s") + return fmt.Errorf("could not prepare certificate for webhook within 20s") } } diff --git a/pkg/yurtmanager/webhook/util/configuration/configuration.go b/pkg/yurtmanager/webhook/util/configuration/configuration.go index 9332939adfc..6c8e629ca91 100644 --- a/pkg/yurtmanager/webhook/util/configuration/configuration.go +++ b/pkg/yurtmanager/webhook/util/configuration/configuration.go @@ -104,13 +104,13 @@ func Ensure(kubeClient clientset.Interface, handlers map[string]struct{}, caBund if !reflect.DeepEqual(mutatingConfig, oldMutatingConfig) { if _, err := kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Update(context.TODO(), mutatingConfig, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("failed to update %s: %v", webhookutil.MutatingWebhookConfigurationName, err) + return fmt.Errorf("could not update %s: %v", webhookutil.MutatingWebhookConfigurationName, err) } } if !reflect.DeepEqual(validatingConfig, oldValidatingConfig) { if _, err := kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Update(context.TODO(), validatingConfig, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("failed to update %s: %v", webhookutil.ValidatingWebhookConfigurationName, err) + return fmt.Errorf("could not update %s: %v", webhookutil.ValidatingWebhookConfigurationName, err) } } diff --git a/pkg/yurtmanager/webhook/util/controller/webhook_controller.go b/pkg/yurtmanager/webhook/util/controller/webhook_controller.go index dad23492209..2e3593c5b28 100644 --- a/pkg/yurtmanager/webhook/util/controller/webhook_controller.go +++ b/pkg/yurtmanager/webhook/util/controller/webhook_controller.go @@ -169,7 +169,7 @@ func (c *Controller) Start(ctx context.Context) { c.informerFactory.Start(ctx.Done()) c.extensionsInformerFactory.Start(ctx.Done()) if !cache.WaitForNamedCacheSync("webhook-controller", ctx.Done(), c.synced...) { - klog.Errorf("Wait For Cache sync webhook-controller faild") + klog.Errorf("Wait For Cache sync webhook-controller failed") return } @@ -228,30 +228,30 @@ func (c *Controller) sync(key string) error { }) } if err != nil { - return fmt.Errorf("failed to ensure certs: %v", err) + return fmt.Errorf("could not ensure certs: %v", err) } certs, _, err := certWriter.EnsureCert(dnsName) if err != nil { - return fmt.Errorf("failed to ensure certs: %v", err) + return fmt.Errorf("could not ensure certs: %v", err) } if err := writer.WriteCertsToDir(webhookutil.GetCertDir(), certs); err != nil { - return fmt.Errorf("failed to write certs to dir: %v", err) + return fmt.Errorf("could not write certs to dir: %v", err) } if err := configuration.Ensure(c.kubeClient, c.handlers, certs.CACert, c.webhookPort); err != nil { - return fmt.Errorf("failed to ensure configuration: %v", err) + return fmt.Errorf("could not ensure configuration: %v", err) } if len(key) != 0 { crd, err := c.extensionsLister.Get(key) if err != nil { - klog.Errorf("failed to get crd(%s), %v", key, err) + klog.Errorf("could not get crd(%s), %v", key, err) return err } if err := ensureCRDConversionCA(c.extensionsClient, crd, certs.CACert); err != nil { - klog.Errorf("failed to ensure conversion configuration for crd(%s), %v", crd.Name, err) + klog.Errorf("could not ensure conversion configuration for crd(%s), %v", crd.Name, err) return err } } diff --git a/pkg/yurtmanager/webhook/util/generator/selfsigned.go b/pkg/yurtmanager/webhook/util/generator/selfsigned.go index 5a203ab4954..c534686c053 100644 --- a/pkg/yurtmanager/webhook/util/generator/selfsigned.go +++ b/pkg/yurtmanager/webhook/util/generator/selfsigned.go @@ -70,11 +70,11 @@ func (cp *SelfSignedCertGenerator) Generate(commonName string) (*Artifacts, erro if !valid { signingKey, err = NewPrivateKey() if err != nil { - return nil, fmt.Errorf("failed to create the CA private key: %v", err) + return nil, fmt.Errorf("could not create the CA private key: %v", err) } signingCert, err = cert.NewSelfSignedCACert(cert.Config{CommonName: "webhook-cert-ca"}, signingKey) if err != nil { - return nil, fmt.Errorf("failed to create the CA cert: %v", err) + return nil, fmt.Errorf("could not create the CA cert: %v", err) } } @@ -89,7 +89,7 @@ func (cp *SelfSignedCertGenerator) Generate(commonName string) (*Artifacts, erro key, err := NewPrivateKey() if err != nil { - return nil, fmt.Errorf("failed to create the private key: %v", err) + return nil, fmt.Errorf("could not create the private key: %v", err) } signedCert, err := NewSignedCert( cert.Config{ @@ -100,7 +100,7 @@ func (cp *SelfSignedCertGenerator) Generate(commonName string) (*Artifacts, erro key, signingCert, signingKey, ) if err != nil { - return nil, fmt.Errorf("failed to create the cert: %v", err) + return nil, fmt.Errorf("could not create the cert: %v", err) } return &Artifacts{ Key: EncodePrivateKeyPEM(key), diff --git a/pkg/yurtmanager/webhook/util/util.go b/pkg/yurtmanager/webhook/util/util.go index c45c4183d42..ea3c659f8a7 100644 --- a/pkg/yurtmanager/webhook/util/util.go +++ b/pkg/yurtmanager/webhook/util/util.go @@ -65,7 +65,7 @@ func GetWebHookPort() int { if p, err := strconv.ParseInt(p, 10, 32); err == nil { port = int(p) } else { - klog.Fatalf("failed to convert WEBHOOK_PORT=%v in env: %v", p, err) + klog.Fatalf("could not convert WEBHOOK_PORT=%v in env: %v", p, err) } } return port diff --git a/pkg/yurttunnel/handlerwrapper/localhostproxy/handler.go b/pkg/yurttunnel/handlerwrapper/localhostproxy/handler.go index 7897c0b87a3..f91959e5965 100644 --- a/pkg/yurttunnel/handlerwrapper/localhostproxy/handler.go +++ b/pkg/yurttunnel/handlerwrapper/localhostproxy/handler.go @@ -66,7 +66,7 @@ func (plm *localHostProxyMiddleware) Name() string { func (plm *localHostProxyMiddleware) WrapHandler(handler http.Handler) http.Handler { // wait for nodes and configmaps have synced if !cache.WaitForCacheSync(wait.NeverStop, plm.nodeInformerSynced, plm.cmInformerSynced) { - klog.Error("failed to sync node or configmap cache") + klog.Error("could not sync node or configmap cache") return handler } @@ -123,7 +123,7 @@ func (plm *localHostProxyMiddleware) SetSharedInformerFactory(factory informers. nodeInformer := factory.Core().V1().Nodes() if err := nodeInformer.Informer().AddIndexers(cache.Indexers{constants.NodeIPKeyIndex: getNodeAddress}); err != nil { - klog.ErrorS(err, "failed to add statusInternalIP indexer") + klog.ErrorS(err, "could not add statusInternalIP indexer") return err } @@ -220,7 +220,7 @@ func (plm *localHostProxyMiddleware) resolveNodeNameByNodeIP(nodeIP string) (str var nodeName string if nodes, err := plm.getNodesByIP(nodeIP); err != nil || len(nodes) == 0 { - klog.Warningf("failed to get node for node ip(%s)", nodeIP) + klog.Warningf("could not get node for node ip(%s)", nodeIP) return "", fmt.Errorf("proxy node ip(%s) is not exist in cluster", nodeIP) } else if len(nodes) != 1 { klog.Warningf("more than one node with the same IP(%s), so unable to proxy request", nodeIP) @@ -231,7 +231,7 @@ func (plm *localHostProxyMiddleware) resolveNodeNameByNodeIP(nodeIP string) (str if len(nodeName) == 0 { klog.Warningf("node name for node ip(%s) is not exist in cluster", nodeIP) - return "", fmt.Errorf("failed to get node name for node ip(%s)", nodeIP) + return "", fmt.Errorf("could not get node name for node ip(%s)", nodeIP) } klog.V(5).Infof("resolved node name(%s) for node ip(%s)", nodeName, nodeIP) diff --git a/pkg/yurttunnel/handlerwrapper/tracerequest/tracereq.go b/pkg/yurttunnel/handlerwrapper/tracerequest/tracereq.go index 0465aea6b24..e7e2921fba7 100644 --- a/pkg/yurttunnel/handlerwrapper/tracerequest/tracereq.go +++ b/pkg/yurttunnel/handlerwrapper/tracerequest/tracereq.go @@ -74,7 +74,7 @@ func (trm *traceReqMiddleware) SetSharedInformerFactory(factory informers.Shared func (trm *traceReqMiddleware) WrapHandler(handler http.Handler) http.Handler { if !cache.WaitForCacheSync(wait.NeverStop, trm.informersSynced...) { - klog.Error("failed to sync node cache for trace request middleware") + klog.Error("could not sync node cache for trace request middleware") return handler } klog.Infof("%d informer synced in traceReqMiddleware", len(trm.informersSynced)) @@ -94,7 +94,7 @@ func (trm *traceReqMiddleware) WrapHandler(handler http.Handler) http.Handler { // detail info link: https://github.com/kubernetes/enhancements/issues/1558 if completed, err := trm.handleRequestsFromKAS(req); completed || err != nil { if err != nil { - klog.Errorf("failed to handle requests from kube-apiserver, but continue go ahead. %v", err) + klog.Errorf("could not handle requests from kube-apiserver, but continue go ahead. %v", err) } } else { host, port, err := net.SplitHostPort(req.Host) @@ -109,7 +109,7 @@ func (trm *traceReqMiddleware) WrapHandler(handler http.Handler) http.Handler { // 1. transform hostname to nodeIP for request in order to send request to nodeIP address at tunnel-agent // 2. put hostname into X-Tunnel-Proxy-Host request header in order to select the correct backend agent. if err := trm.modifyRequest(req, host, port); err != nil { - klog.Errorf("failed to modify request, %v", err) + klog.Errorf("could not modify request, %v", err) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -146,14 +146,14 @@ func (trm *traceReqMiddleware) WrapHandler(handler http.Handler) http.Handler { func (trm *traceReqMiddleware) modifyRequest(req *http.Request, host, port string) error { node, err := trm.nodeLister.Get(host) if err != nil { - klog.Errorf("failed to get node(%s), %v", host, err) + klog.Errorf("could not get node(%s), %v", host, err) return err } nodeIP := getNodeIP(node) if nodeIP == "" { - klog.Errorf("failed to get node(%s) ip", host) - return errors.New("failed to get node ip") + klog.Errorf("could not get node(%s) ip", host) + return errors.New("could not get node ip") } // transform hostname to node ip in request diff --git a/pkg/yurttunnel/kubernetes/kubernetes.go b/pkg/yurttunnel/kubernetes/kubernetes.go index 0e63b7285cb..de0036278ca 100644 --- a/pkg/yurttunnel/kubernetes/kubernetes.go +++ b/pkg/yurttunnel/kubernetes/kubernetes.go @@ -56,7 +56,7 @@ func CreateClientSetKubeConfig(kubeConfig string) (*kubernetes.Clientset, error) } cfg, err = clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { - return nil, fmt.Errorf("fail to create the clientset based on %s: %w", + return nil, fmt.Errorf("could not create the clientset based on %s: %w", kubeConfig, err) } cliSet, err := kubernetes.NewForConfig(cfg) diff --git a/pkg/yurttunnel/server/anpserver.go b/pkg/yurttunnel/server/anpserver.go index 2c5bc7c69d1..a17a18af2e5 100644 --- a/pkg/yurttunnel/server/anpserver.go +++ b/pkg/yurttunnel/server/anpserver.go @@ -67,7 +67,7 @@ func (ats *anpTunnelServer) Run() error { ats.interceptorServerUDSFile, ats.tlsCfg) if proxierErr != nil { - return fmt.Errorf("fail to run the proxier: %w", proxierErr) + return fmt.Errorf("could not run the proxier: %w", proxierErr) } wrappedHandler, err := wh.WrapHandler( @@ -75,7 +75,7 @@ func (ats *anpTunnelServer) Run() error { ats.wrappers, ) if err != nil { - return fmt.Errorf("fail to wrap handler: %w", err) + return fmt.Errorf("could not wrap handler: %w", err) } // 2. start the master server @@ -86,13 +86,13 @@ func (ats *anpTunnelServer) Run() error { ats.serverMasterInsecureAddr, ats.tlsCfg) if masterServerErr != nil { - return fmt.Errorf("fail to run master server: %w", masterServerErr) + return fmt.Errorf("could not run master server: %w", masterServerErr) } // 3. start the agent server agentServerErr := runAgentServer(ats.tlsCfg, ats.serverAgentAddr, proxyServer) if agentServerErr != nil { - return fmt.Errorf("fail to run agent server: %w", agentServerErr) + return fmt.Errorf("could not run agent server: %w", agentServerErr) } return nil @@ -119,11 +119,11 @@ func runProxier(handler http.Handler, } unixListener, err := net.Listen("unix", udsSockFile) if err != nil { - klog.Errorf("proxier fail to serving request through uds: %s", err) + klog.Errorf("proxier could not serving request through uds: %s", err) } defer unixListener.Close() if err := server.Serve(unixListener); err != nil { - klog.Errorf("proxier fail to serving request through uds: %s", err) + klog.Errorf("proxier could not serving request through uds: %s", err) } }() @@ -149,7 +149,7 @@ func runMasterServer(handler http.Handler, TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), } if err := server.ListenAndServeTLS("", ""); err != nil { - klog.Errorf("failed to serve https request from master: %v", err) + klog.Errorf("could not serve https request from master: %v", err) } }() @@ -162,7 +162,7 @@ func runMasterServer(handler http.Handler, TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), } if err := server.ListenAndServe(); err != nil { - klog.Errorf("failed to serve http request from master: %v", err) + klog.Errorf("could not serve http request from master: %v", err) } }() @@ -194,7 +194,7 @@ func runAgentServer(tlsCfg *tls.Config, listener, err := net.Listen("tcp", agentServerAddr) klog.Info("start handling connection from agents") if err != nil { - return fmt.Errorf("fail to listen to agent on %s: %w", agentServerAddr, err) + return fmt.Errorf("could not listen to agent on %s: %w", agentServerAddr, err) } go grpcServer.Serve(listener) return nil diff --git a/pkg/yurttunnel/server/interceptor.go b/pkg/yurttunnel/server/interceptor.go index 3e4baf4841a..cc0a5a529af 100644 --- a/pkg/yurttunnel/server/interceptor.go +++ b/pkg/yurttunnel/server/interceptor.go @@ -107,7 +107,7 @@ func NewRequestInterceptor(udsSockFile string, cfg *tls.Config) *RequestIntercep tlsTunnelConn := tls.Client(proxyConn, cfg) if err := tlsTunnelConn.Handshake(); err != nil { proxyConn.Close() - return nil, fmt.Errorf("fail to setup TLS handshake through the Tunnel: %w", err) + return nil, fmt.Errorf("could not setup TLS handshake through the Tunnel: %w", err) } klog.V(4).Infof("successfully setup TLS connection to %q with headers: %s", addr, connectHeaders) return tlsTunnelConn, nil @@ -144,7 +144,7 @@ func (ri *RequestInterceptor) ServeHTTP(w http.ResponseWriter, r *http.Request) tunnelConn, err := ri.contextDialer(r.Host, r.Header, r.TLS != nil) if err != nil { klogAndHTTPError(w, http.StatusServiceUnavailable, - "fail to setup the tunnel: %s", err) + "could not setup the tunnel: %s", err) return } defer tunnelConn.Close() @@ -152,7 +152,7 @@ func (ri *RequestInterceptor) ServeHTTP(w http.ResponseWriter, r *http.Request) // 2. proxy the request to tunnel if err := r.Write(tunnelConn); err != nil { klogAndHTTPError(w, http.StatusServiceUnavailable, - "fail to write request to tls connection: %s", err) + "could not write request to tls connection: %s", err) return } @@ -200,7 +200,7 @@ func serveUpgradeRequest(tunnelConn net.Conn, w http.ResponseWriter, r *http.Req clientConn, _, err := hijacker.Hijack() if err != nil { klogAndHTTPError(w, http.StatusServiceUnavailable, - "fail to hijack response: %s", err) + "could not hijack response: %s", err) return } defer clientConn.Close() @@ -276,7 +276,7 @@ func serveRequest(tunnelConn net.Conn, w http.ResponseWriter, r *http.Request) { defer putBufioReader(br) tunnelHTTPResp, err := http.ReadResponse(br, r) if err != nil { - klogAndHTTPError(w, http.StatusServiceUnavailable, "fail to read response from the tunnel: %v", err) + klogAndHTTPError(w, http.StatusServiceUnavailable, "could not read response from the tunnel: %v", err) return } klog.V(4).Infof("interceptor: successfully read the http response from the proxy tunnel for request %s", r.URL.String()) @@ -317,7 +317,7 @@ func serveRequest(tunnelConn net.Conn, w http.ResponseWriter, r *http.Request) { _, err = io.Copy(writer, tunnelHTTPResp.Body) if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - klog.ErrorS(err, "fail to copy response from the tunnel back to the client") + klog.ErrorS(err, "could not copy response from the tunnel back to the client") } klog.V(4).Infof("interceptor: stop serving request %s with headers: %v", r.URL.String(), r.Header) diff --git a/pkg/yurttunnel/server/serveraddr/addr.go b/pkg/yurttunnel/server/serveraddr/addr.go index 04c97ae9cfa..ff96e34b1cc 100644 --- a/pkg/yurttunnel/server/serveraddr/addr.go +++ b/pkg/yurttunnel/server/serveraddr/addr.go @@ -83,7 +83,7 @@ func GetTunnelServerAddr(clientset kubernetes.Interface) (string, error) { } if tcpPort == 0 { - return "", errors.New("fail to get the port number") + return "", errors.New("could not get the port number") } return net.JoinHostPort(host, strconv.Itoa(int(tcpPort))), nil diff --git a/pkg/yurttunnel/trafficforward/dns/dns.go b/pkg/yurttunnel/trafficforward/dns/dns.go index b9e833e4d89..02d83b51f00 100644 --- a/pkg/yurttunnel/trafficforward/dns/dns.go +++ b/pkg/yurttunnel/trafficforward/dns/dns.go @@ -140,7 +140,7 @@ func (dnsctl *coreDNSRecordController) Run(stopCh <-chan struct{}) { electionChecker := leaderelection.NewLeaderHealthzAdaptor(time.Second * 20) id, err := os.Hostname() if err != nil { - klog.Fatalf("failed to get hostname, %v", err) + klog.Fatalf("could not get hostname, %v", err) } rl, err := resourcelock.New("leases", metav1.NamespaceSystem, dnsControllerName, dnsctl.kubeClient.CoreV1(), @@ -184,7 +184,7 @@ func (dnsctl *coreDNSRecordController) run(stopCh <-chan struct{}) { } if err := dnsctl.ensureCoreDNSRecordConfigMap(); err != nil { - klog.Errorf("failed to ensure dns record ConfigMap %v/%v, %v", + klog.Errorf("could not ensure dns record ConfigMap %v/%v, %v", constants.YurttunnelDNSRecordConfigMapNs, yurttunnelDNSRecordConfigMapName, err) return } @@ -194,14 +194,14 @@ func (dnsctl *coreDNSRecordController) run(stopCh <-chan struct{}) { // sync dns hosts as a whole go wait.Until(func() { if err := dnsctl.syncDNSRecordAsWhole(); err != nil { - klog.Errorf("failed to sync dns record, %v", err) + klog.Errorf("could not sync dns record, %v", err) } }, time.Duration(dnsctl.syncPeriod)*time.Second, stopCh) // sync tunnel server svc go wait.Until(func() { if err := dnsctl.syncTunnelServerServiceAsWhole(); err != nil { - klog.Errorf("failed to sync tunnel server service, %v", err) + klog.Errorf("could not sync tunnel server service, %v", err) } }, time.Duration(dnsctl.syncPeriod)*time.Second, stopCh) @@ -291,7 +291,7 @@ func (dnsctl *coreDNSRecordController) ensureCoreDNSRecordConfigMap() error { } _, err = dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs).Create(context.Background(), cm, metav1.CreateOptions{}) if err != nil { - return fmt.Errorf("failed to create ConfigMap %v/%v, %w", + return fmt.Errorf("could not create ConfigMap %v/%v, %w", constants.YurttunnelServerServiceNs, yurttunnelDNSRecordConfigMapName, err) } } @@ -315,13 +315,13 @@ func (dnsctl *coreDNSRecordController) syncDNSRecordAsWhole() error { tunnelServerIP, err := dnsctl.getTunnelServerIP(false) if err != nil { - klog.Errorf("failed to sync dns record as whole, %v", err) + klog.Errorf("could not sync dns record as whole, %v", err) return err } nodes, err := dnsctl.nodeLister.List(labels.Everything()) if err != nil { - klog.Errorf("failed to sync dns record as whole, %v", err) + klog.Errorf("could not sync dns record as whole, %v", err) return err } @@ -331,7 +331,7 @@ func (dnsctl *coreDNSRecordController) syncDNSRecordAsWhole() error { if !isEdgeNode(node) { ip, err = getNodeHostIP(node) if err != nil { - klog.Warningf("failed to parse node address for %v, %v", node.Name, err) + klog.Warningf("could not parse node address for %v, %v", node.Name, err) continue } } @@ -339,7 +339,7 @@ func (dnsctl *coreDNSRecordController) syncDNSRecordAsWhole() error { } if err := dnsctl.updateDNSRecords(records); err != nil { - klog.Errorf("failed to sync dns record as whole, %v", err) + klog.Errorf("could not sync dns record as whole, %v", err) return err } return nil @@ -353,7 +353,7 @@ func (dnsctl *coreDNSRecordController) getTunnelServerIP(useCache bool) (string, svc, err := dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs). Get(context.Background(), constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) if err != nil { - return "", fmt.Errorf("failed to get %v/%v service, %w", + return "", fmt.Errorf("could not get %v/%v service, %w", constants.YurttunnelServerServiceNs, constants.YurttunnelServerInternalServiceName, err) } if len(svc.Spec.ClusterIP) == 0 { @@ -381,7 +381,7 @@ func (dnsctl *coreDNSRecordController) updateDNSRecords(records []string) error } cm.Data[constants.YurttunnelDNSRecordNodeDataKey] = strings.Join(records, "\n") if _, err := dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs).Update(context.Background(), cm, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("failed to update configmap %v/%v, %w", + return fmt.Errorf("could not update configmap %v/%v, %w", constants.YurttunnelServerServiceNs, yurttunnelDNSRecordConfigMapName, err) } return nil @@ -391,7 +391,7 @@ func (dnsctl *coreDNSRecordController) updateTunnelServerSvcDnatPorts(ports []st svc, err := dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs). Get(context.Background(), constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("failed to sync tunnel server internal service, %w", err) + return fmt.Errorf("could not sync tunnel server internal service, %w", err) } changed, updatedSvcPorts := resolveServicePorts(svc, ports, portMappings) @@ -402,7 +402,7 @@ func (dnsctl *coreDNSRecordController) updateTunnelServerSvcDnatPorts(ports []st svc.Spec.Ports = updatedSvcPorts _, err = dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs).Update(context.Background(), svc, metav1.UpdateOptions{}) if err != nil { - return fmt.Errorf("failed to sync tunnel server service, %w", err) + return fmt.Errorf("could not sync tunnel server service, %w", err) } return nil } @@ -421,24 +421,24 @@ func resolveServicePorts(svc *corev1.Service, ports []string, portMappings map[s for _, dnatPort := range ports { portInt, err := strconv.Atoi(dnatPort) if err != nil { - klog.Errorf("failed to parse dnat port %q, %v", dnatPort, err) + klog.Errorf("could not parse dnat port %q, %v", dnatPort, err) continue } dst, ok := portMappings[dnatPort] if !ok { - klog.Errorf("failed to find proxy destination for port: %s", dnatPort) + klog.Errorf("could not find proxy destination for port: %s", dnatPort) continue } _, targetPort, err := net.SplitHostPort(dst) if err != nil { - klog.Errorf("failed to split target port, %v", err) + klog.Errorf("could not split target port, %v", err) continue } targetPortInt, err := strconv.Atoi(targetPort) if err != nil { - klog.Errorf("failed to parse target port, %v", err) + klog.Errorf("could not parse target port, %v", err) continue } diff --git a/pkg/yurttunnel/trafficforward/dns/util.go b/pkg/yurttunnel/trafficforward/dns/util.go index bd1a1a31517..678e4046e90 100644 --- a/pkg/yurttunnel/trafficforward/dns/util.go +++ b/pkg/yurttunnel/trafficforward/dns/util.go @@ -80,7 +80,7 @@ func removeRecordByHostname(records []string, hostname string) (result []string, func parseHostnameFromDNSRecord(record string) (string, error) { arr := strings.Split(record, "\t") if len(arr) != 2 { - return "", fmt.Errorf("failed to parse hostname, invalid dns record %q", record) + return "", fmt.Errorf("could not parse hostname, invalid dns record %q", record) } return arr[1], nil } diff --git a/pkg/yurttunnel/trafficforward/iptables/iptables.go b/pkg/yurttunnel/trafficforward/iptables/iptables.go index 4103f0d4b2a..36786c110c3 100644 --- a/pkg/yurttunnel/trafficforward/iptables/iptables.go +++ b/pkg/yurttunnel/trafficforward/iptables/iptables.go @@ -188,7 +188,7 @@ func (im *iptablesManager) cleanupIptableSetting() { args := append(iptablesJumpChains[0].extraArgs, "-m", "comment", "--comment", iptablesJumpChains[0].comment, "-j", string(iptablesJumpChains[0].dstChain)) if err := im.iptables.DeleteRule(iptablesJumpChains[0].table, iptablesJumpChains[0].srcChain, args...); err != nil { - klog.Errorf("failed to delete rule that %s chain %s jumps to %s: %v", + klog.Errorf("could not delete rule that %s chain %s jumps to %s: %v", iptablesJumpChains[0].table, iptablesJumpChains[0].srcChain, iptablesJumpChains[0].dstChain, err) } im.deleteJumpChainsWithoutCheck(deletedJumpChains) @@ -215,7 +215,7 @@ func (im *iptablesManager) deleteJumpChains(jumpChains []iptablesJumpChain) erro jump.comment, "-j", string(jump.dstChain)) // delete the jump rule if err := im.iptables.DeleteRule(jump.table, jump.srcChain, args...); err != nil { - klog.Errorf("failed to delete rule that %s chain %s jumps to %s: %v", + klog.Errorf("could not delete rule that %s chain %s jumps to %s: %v", jump.table, jump.srcChain, jump.dstChain, err) return err } @@ -243,7 +243,7 @@ func (im *iptablesManager) getIPOfNodesWithoutAgent() []string { var nodesIP []string nodes, err := im.nodeInformer.Lister().List(labels.Everything()) if err != nil { - klog.Errorf("failed to list nodes for iptables: %v", err) + klog.Errorf("could not list nodes for iptables: %v", err) return nodesIP } @@ -309,7 +309,7 @@ func (im *iptablesManager) ensurePortsIptables(currentPorts, deletedPorts, curre }) } if err := im.ensureJumpChains(jumpChains); err != nil { - klog.Errorf("Failed to ensure jump chain, %v", err) + klog.Errorf("could not ensure jump chain, %v", err) return err } @@ -337,7 +337,7 @@ func (im *iptablesManager) ensurePortsIptables(currentPorts, deletedPorts, curre }) } if err := im.deleteJumpChains(deletedJumpChains); err != nil { - klog.Errorf("Failed to delete jump chain, %v", err) + klog.Errorf("could not delete jump chain, %v", err) return err } @@ -419,7 +419,7 @@ func (im *iptablesManager) ensureJumpChains(jumpChains []iptablesJumpChain) erro iptables.Prepend, jump.table, jump.srcChain, args...); err != nil { - klog.Errorf("failed to ensure that %s chain %s jumps to %s: %v", + klog.Errorf("could not ensure that %s chain %s jumps to %s: %v", jump.table, jump.srcChain, jump.dstChain, err) return err } @@ -512,7 +512,7 @@ func (im *iptablesManager) syncIptableSetting() { // check if there are new dnat ports dnatPorts, portMappings, err := util.GetConfiguredProxyPortsAndMappings(im.kubeClient, im.insecureDnatDest, im.secureDnatDest) if err != nil { - klog.Errorf("failed to sync iptables rules, %v", err) + klog.Errorf("could not sync iptables rules, %v", err) return } portsChanged, deletedDnatPorts := getDeletedPorts(im.lastDnatPorts, dnatPorts) @@ -526,7 +526,7 @@ func (im *iptablesManager) syncIptableSetting() { // update the iptables setting if necessary err = im.ensurePortsIptables(currentDnatPorts, deletedDnatPorts, currentNodesIP, deletedNodesIP, portMappings) if err != nil { - klog.Errorf("failed to ensurePortsIptables: %v", err) + klog.Errorf("could not ensurePortsIptables: %v", err) return } diff --git a/pkg/yurttunnel/util/util.go b/pkg/yurttunnel/util/util.go index b812c15bc66..c280515b2f5 100644 --- a/pkg/yurttunnel/util/util.go +++ b/pkg/yurttunnel/util/util.go @@ -93,7 +93,7 @@ func GetConfiguredProxyPortsAndMappings(client clientset.Interface, insecureList if apierrors.IsNotFound(err) { return []string{}, map[string]string{}, nil } - return []string{}, map[string]string{}, fmt.Errorf("failed to get configmap %s/%s: %w", + return []string{}, map[string]string{}, fmt.Errorf("could not get configmap %s/%s: %w", YurttunnelServerDnatConfigMapNs, YurttunnelServerDnatConfigMapName, err) } @@ -160,7 +160,7 @@ func resolvePorts(portsStr, insecurePort string) []string { if len(proxyPort) != 0 { portInt, err := strconv.Atoi(proxyPort) if err != nil { - klog.Errorf("failed to parse port %s, %v", port, err) + klog.Errorf("could not parse port %s, %v", port, err) continue } else if portInt < MinPort || portInt > MaxPort { klog.Errorf("port %s is not invalid port(should be range 1~65535)", port)