diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 7288be9a0d69..c939c674988c 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -17,5 +17,5 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v2 with: - version: v1.43.0 + version: v1.44.0 working-directory: ${{matrix.working-directory}} diff --git a/.golangci.yml b/.golangci.yml index 597722eaa67e..16a9925f8262 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,6 +3,7 @@ linters: enable: - asciicheck - bodyclose + - containedctx - deadcode - depguard - dogsled @@ -236,12 +237,20 @@ issues: - typecheck text: import (".+") is a program, not an importable package path: ^tools\.go$ - # Ignore ifshort false positive - # TODO(sbueringer) false positive: https://github.com/esimonov/ifshort/issues/23 + # TODO(sbueringer) Ignore ifshort false positive: https://github.com/esimonov/ifshort/issues/23 - linters: - ifshort text: "variable 'isDeleteNodeAllowed' is only used in the if-statement.*" path: ^internal/controllers/machine/machine_controller\.go$ + - linters: + - ifshort + text: "variable 'kcpMachinesWithErrors' is only used in the if-statement.*" + path: ^controlplane/kubeadm/internal/workload_cluster_conditions\.go$ + # We don't care about defer in for loops in test files. + - linters: + - gocritic + text: "deferInLoop: Possible resource leak, 'defer' is called in the 'for' loop" + path: _test\.go run: timeout: 10m diff --git a/cmd/clusterctl/client/alpha/rollout_pauser.go b/cmd/clusterctl/client/alpha/rollout_pauser.go index 80e0ebea6766..862cecc45025 100644 --- a/cmd/clusterctl/client/alpha/rollout_pauser.go +++ b/cmd/clusterctl/client/alpha/rollout_pauser.go @@ -36,7 +36,7 @@ func (r *rollout) ObjectPauser(proxy cluster.Proxy, ref corev1.ObjectReference) return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if deployment.Spec.Paused { - return errors.Errorf("MachineDeploymet is already paused: %v/%v\n", ref.Kind, ref.Name) + return errors.Errorf("MachineDeploymet is already paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // MachineDeployment is intentionally capitalized. } if err := pauseMachineDeployment(proxy, ref.Name, ref.Namespace); err != nil { return err diff --git a/cmd/clusterctl/client/alpha/rollout_restarter.go b/cmd/clusterctl/client/alpha/rollout_restarter.go index 67e9bf4d5da0..c422c65d242b 100644 --- a/cmd/clusterctl/client/alpha/rollout_restarter.go +++ b/cmd/clusterctl/client/alpha/rollout_restarter.go @@ -37,7 +37,7 @@ func (r *rollout) ObjectRestarter(proxy cluster.Proxy, ref corev1.ObjectReferenc return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if deployment.Spec.Paused { - return errors.Errorf("can't restart paused machinedeployment (run rollout resume first): %v/%v\n", ref.Kind, ref.Name) + return errors.Errorf("can't restart paused MachineDeployment (run rollout resume first): %v/%v", ref.Kind, ref.Name) } if err := setRestartedAtAnnotation(proxy, ref.Name, ref.Namespace); err != nil { return err diff --git a/cmd/clusterctl/client/alpha/rollout_resumer.go b/cmd/clusterctl/client/alpha/rollout_resumer.go index 5e694516b264..1cdc5a0ee6b4 100644 --- a/cmd/clusterctl/client/alpha/rollout_resumer.go +++ b/cmd/clusterctl/client/alpha/rollout_resumer.go @@ -36,13 +36,13 @@ func (r *rollout) ObjectResumer(proxy cluster.Proxy, ref corev1.ObjectReference) return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if !deployment.Spec.Paused { - return errors.Errorf("MachineDeployment is not currently paused: %v/%v\n", ref.Kind, ref.Name) + return errors.Errorf("MachineDeployment is not currently paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // MachineDeployment is intentionally capitalized. } if err := resumeMachineDeployment(proxy, ref.Name, ref.Namespace); err != nil { return err } default: - return errors.Errorf("Invalid resource type %q, valid values are %v", ref.Kind, validResourceTypes) + return errors.Errorf("invalid resource type %q, valid values are %v", ref.Kind, validResourceTypes) } return nil } diff --git a/cmd/clusterctl/client/client_test.go b/cmd/clusterctl/client/client_test.go index 85342294263e..fad51bcd1b1f 100644 --- a/cmd/clusterctl/client/client_test.go +++ b/cmd/clusterctl/client/client_test.go @@ -169,7 +169,7 @@ func newFakeClient(configClient config.Client) *fakeClient { // converting the client.Kubeconfig to cluster.Kubeconfig alias k := cluster.Kubeconfig(i.Kubeconfig) if _, ok := fake.clusters[k]; !ok { - return nil, errors.Errorf("Cluster for kubeconfig %q and/or context %q does not exist.", i.Kubeconfig.Path, i.Kubeconfig.Context) + return nil, errors.Errorf("Cluster for kubeconfig %q and/or context %q does not exist", i.Kubeconfig.Path, i.Kubeconfig.Context) } return fake.clusters[k], nil } @@ -179,7 +179,7 @@ func newFakeClient(configClient config.Client) *fakeClient { InjectClusterClientFactory(clusterClientFactory), InjectRepositoryFactory(func(input RepositoryClientFactoryInput) (repository.Client, error) { if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok { - return nil, errors.Errorf("Repository for kubeconfig %q does not exist.", input.Provider.ManifestLabel()) + return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel()) } return fake.repositories[input.Provider.ManifestLabel()], nil }), @@ -222,7 +222,7 @@ func newFakeCluster(kubeconfig cluster.Kubeconfig, configClient config.Client) * cluster.InjectPollImmediateWaiter(pollImmediateWaiter), cluster.InjectRepositoryFactory(func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { if _, ok := fake.repositories[provider.Name()]; !ok { - return nil, errors.Errorf("Repository for kubeconfig %q does not exists.", provider.Name()) + return nil, errors.Errorf("repository for kubeconfig %q does not exist", provider.Name()) } return fake.repositories[provider.Name()], nil }), diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index 663c0d1b76e9..833f65e43e48 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -929,7 +929,7 @@ func Test_objectMover_filesToObjs(t *testing.T) { for _, fileName := range tt.files { path := filepath.Join(dir, fileName) - file, err := os.Create(path) + file, err := os.Create(path) //nolint:gosec // No security issue: unit test. if err != nil { return } diff --git a/cmd/clusterctl/client/common.go b/cmd/clusterctl/client/common.go index db47705993c5..be299a7307c7 100644 --- a/cmd/clusterctl/client/common.go +++ b/cmd/clusterctl/client/common.go @@ -87,16 +87,14 @@ func parseProviderName(provider string) (name string, version string, err error) } func validateDNS1123Label(label string) error { - errs := validation.IsDNS1123Label(label) - if len(errs) != 0 { + if errs := validation.IsDNS1123Label(label); len(errs) != 0 { return errors.New(strings.Join(errs, "; ")) } return nil } func validateDNS1123Domanin(subdomain string) error { - errs := validation.IsDNS1123Subdomain(subdomain) - if len(errs) != 0 { + if errs := validation.IsDNS1123Subdomain(subdomain); len(errs) != 0 { return errors.New(strings.Join(errs, "; ")) } return nil diff --git a/cmd/clusterctl/client/config/providers_client.go b/cmd/clusterctl/client/config/providers_client.go index 2f616bd9084c..4bb051bf6555 100644 --- a/cmd/clusterctl/client/config/providers_client.go +++ b/cmd/clusterctl/client/config/providers_client.go @@ -298,8 +298,7 @@ func validateProvider(r Provider) error { return errors.Errorf("name %s must be used with the %s type (name: %s, type: %s)", ClusterAPIProviderName, clusterctlv1.CoreProviderType, r.Name(), r.Type()) } - errMsgs := validation.IsDNS1123Subdomain(r.Name()) - if len(errMsgs) != 0 { + if errMsgs := validation.IsDNS1123Subdomain(r.Name()); len(errMsgs) != 0 { return errors.Errorf("invalid provider name: %s", strings.Join(errMsgs, "; ")) } if r.URL() == "" { diff --git a/cmd/clusterctl/client/config/reader_viper.go b/cmd/clusterctl/client/config/reader_viper.go index 939f8720afaf..113423100efe 100644 --- a/cmd/clusterctl/client/config/reader_viper.go +++ b/cmd/clusterctl/client/config/reader_viper.go @@ -137,7 +137,7 @@ func downloadFile(url string, filepath string) error { ctx := context.TODO() // Create the file - out, err := os.Create(filepath) + out, err := os.Create(filepath) //nolint:gosec // No security issue: filepath is safe. if err != nil { return errors.Wrapf(err, "failed to create the clusterctl config file %s", filepath) } diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go index ab27fd35c5f4..2f07f3bcbd96 100644 --- a/cmd/clusterctl/client/config_test.go +++ b/cmd/clusterctl/client/config_test.go @@ -823,7 +823,7 @@ func newFakeClientWithoutCluster(configClient config.Client) *fakeClient { InjectConfig(fake.configClient), InjectRepositoryFactory(func(input RepositoryClientFactoryInput) (repository.Client, error) { if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok { - return nil, errors.Errorf("Repository for kubeconfig %q does not exist.", input.Provider.ManifestLabel()) + return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel()) } return fake.repositories[input.Provider.ManifestLabel()], nil }), diff --git a/cmd/clusterctl/client/delete.go b/cmd/clusterctl/client/delete.go index 7c10cfced556..ea5785afaa24 100644 --- a/cmd/clusterctl/client/delete.go +++ b/cmd/clusterctl/client/delete.go @@ -116,7 +116,7 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error { return err } if provider.Namespace == "" { - return errors.Errorf("Failed to identify the namespace for the %q provider.", provider.ProviderName) + return errors.Errorf("failed to identify the namespace for the %q provider", provider.ProviderName) } if provider.Version != "" { @@ -125,7 +125,7 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error { return err } if provider.Version != version { - return errors.Errorf("Failed to identity the provider %q with version %q.", provider.ProviderName, provider.Version) + return errors.Errorf("failed to identify the provider %q with version %q", provider.ProviderName, provider.Version) } } diff --git a/cmd/clusterctl/cmd/config_repositories.go b/cmd/clusterctl/cmd/config_repositories.go index dc9957c794f9..90d544202106 100644 --- a/cmd/clusterctl/cmd/config_repositories.go +++ b/cmd/clusterctl/cmd/config_repositories.go @@ -78,7 +78,7 @@ func init() { func runGetRepositories(cfgFile string, out io.Writer) error { if cro.output != RepositoriesOutputText && cro.output != RepositoriesOutputYaml { - return errors.Errorf("Invalid output format %q. Valid values: %v.", cro.output, RepositoriesOutputs) + return errors.Errorf("invalid output format %q, valid values: %v", cro.output, RepositoriesOutputs) } if out == nil { diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index be9e1485168a..6fd2b5ff54b0 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -21,6 +21,7 @@ import ( "fmt" "strings" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -102,26 +103,8 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane continue } - // Create the etcd Client for the etcd Pod scheduled on the Node - etcdClient, err := w.etcdClientGenerator.forFirstAvailableNode(ctx, []string{node.Name}) + currentMembers, err := w.getCurrentEtcdMembers(ctx, machine, node.Name) if err != nil { - conditions.MarkUnknown(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to the etcd pod on the %s node: %s", node.Name, err) - continue - } - defer etcdClient.Close() - - // While creating a new client, forFirstAvailableNode retrieves the status for the endpoint; check if the endpoint has errors. - if len(etcdClient.Errors) > 0 { - conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member status reports errors: %s", strings.Join(etcdClient.Errors, ", ")) - continue - } - - // Gets the list etcd members known by this member. - currentMembers, err := etcdClient.Members(ctx) - if err != nil { - // NB. We should never be in here, given that we just received answer to the etcd calls included in forFirstAvailableNode; - // however, we are considering the calls to Members a signal of etcd not being stable. - conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed get answer from the etcd member on the %s node", node.Name) continue } @@ -182,6 +165,33 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane }) } +func (w *Workload) getCurrentEtcdMembers(ctx context.Context, machine *clusterv1.Machine, nodeName string) ([]*etcd.Member, error) { + // Create the etcd Client for the etcd Pod scheduled on the Node + etcdClient, err := w.etcdClientGenerator.forFirstAvailableNode(ctx, []string{nodeName}) + if err != nil { + conditions.MarkUnknown(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to the etcd pod on the %s node: %s", nodeName, err) + return nil, errors.Wrapf(err, "failed to get current etcd members: failed to connect to the etcd pod on the %s node", nodeName) + } + defer etcdClient.Close() + + // While creating a new client, forFirstAvailableNode retrieves the status for the endpoint; check if the endpoint has errors. + if len(etcdClient.Errors) > 0 { + conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member status reports errors: %s", strings.Join(etcdClient.Errors, ", ")) + return nil, errors.Errorf("failed to get current etcd members: etcd member status reports errors: %s", strings.Join(etcdClient.Errors, ", ")) + } + + // Gets the list etcd members known by this member. + currentMembers, err := etcdClient.Members(ctx) + if err != nil { + // NB. We should never be in here, given that we just received answer to the etcd calls included in forFirstAvailableNode; + // however, we are considering the calls to Members a signal of etcd not being stable. + conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed get answer from the etcd member on the %s node", nodeName) + return nil, errors.Errorf("failed to get current etcd members: failed get answer from the etcd member on the %s node", nodeName) + } + + return currentMembers, nil +} + func compareMachinesAndMembers(controlPlane *ControlPlane, members []*etcd.Member, kcpErrors []string) []string { // NOTE: We run this check only if we actually know the list of members, otherwise the first for loop // could generate a false negative when reporting missing etcd members. diff --git a/controlplane/kubeadm/internal/workload_cluster_etcd.go b/controlplane/kubeadm/internal/workload_cluster_etcd.go index 14cea12d1424..3b3662a29f01 100644 --- a/controlplane/kubeadm/internal/workload_cluster_etcd.go +++ b/controlplane/kubeadm/internal/workload_cluster_etcd.go @@ -37,50 +37,59 @@ type etcdClientFor interface { // ReconcileEtcdMembers iterates over all etcd members and finds members that do not have corresponding nodes. // If there are any such members, it deletes them from etcd and removes their nodes from the kubeadm configmap so that kubeadm does not run etcd health checks on them. func (w *Workload) ReconcileEtcdMembers(ctx context.Context, nodeNames []string, version semver.Version) ([]string, error) { - removedMembers := []string{} - errs := []error{} + allRemovedMembers := []string{} + allErrs := []error{} for _, nodeName := range nodeNames { - // Create the etcd Client for the etcd Pod scheduled on the Node - etcdClient, err := w.etcdClientGenerator.forFirstAvailableNode(ctx, []string{nodeName}) - if err != nil { - continue - } - defer etcdClient.Close() + removedMembers, errs := w.reconcileEtcdMember(ctx, nodeNames, nodeName, version) + allRemovedMembers = append(allRemovedMembers, removedMembers...) + allErrs = append(allErrs, errs...) + } + + return allRemovedMembers, kerrors.NewAggregate(allErrs) +} - members, err := etcdClient.Members(ctx) - if err != nil { +func (w *Workload) reconcileEtcdMember(ctx context.Context, nodeNames []string, nodeName string, version semver.Version) ([]string, []error) { + // Create the etcd Client for the etcd Pod scheduled on the Node + etcdClient, err := w.etcdClientGenerator.forFirstAvailableNode(ctx, []string{nodeName}) + if err != nil { + return nil, nil + } + defer etcdClient.Close() + + members, err := etcdClient.Members(ctx) + if err != nil { + return nil, nil + } + + // Check if any member's node is missing from workload cluster + // If any, delete it with best effort + removedMembers := []string{} + errs := []error{} +loopmembers: + for _, member := range members { + // If this member is just added, it has a empty name until the etcd pod starts. Ignore it. + if member.Name == "" { continue } - // Check if any member's node is missing from workload cluster - // If any, delete it with best effort - loopmembers: - for _, member := range members { - // If this member is just added, it has a empty name until the etcd pod starts. Ignore it. - if member.Name == "" { - continue - } - - for _, nodeName := range nodeNames { - if member.Name == nodeName { - // We found the matching node, continue with the outer loop. - continue loopmembers - } + for _, nodeName := range nodeNames { + if member.Name == nodeName { + // We found the matching node, continue with the outer loop. + continue loopmembers } + } - // If we're here, the node cannot be found. - removedMembers = append(removedMembers, member.Name) - if err := w.removeMemberForNode(ctx, member.Name); err != nil { - errs = append(errs, err) - } + // If we're here, the node cannot be found. + removedMembers = append(removedMembers, member.Name) + if err := w.removeMemberForNode(ctx, member.Name); err != nil { + errs = append(errs, err) + } - if err := w.RemoveNodeFromKubeadmConfigMap(ctx, member.Name, version); err != nil { - errs = append(errs, err) - } + if err := w.RemoveNodeFromKubeadmConfigMap(ctx, member.Name, version); err != nil { + errs = append(errs, err) } } - - return removedMembers, kerrors.NewAggregate(errs) + return removedMembers, errs } // UpdateEtcdVersionInKubeadmConfigMap sets the imageRepository or the imageTag or both in the kubeadm config map. diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index 0aa82a805037..30232f825048 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -81,7 +81,7 @@ func (webhook *Cluster) Default(ctx context.Context, obj runtime.Object) error { clusterClass, err := webhook.getClusterClassForCluster(ctx, cluster) if err != nil { // Return early with errors if the ClusterClass can't be retrieved. - return apierrors.NewInternalError(errors.Wrapf(err, "Cluster %s can't be validated. ClusterClass %s can not be retrieved.", cluster.Name, cluster.Spec.Topology.Class)) + return apierrors.NewInternalError(errors.Wrapf(err, "Cluster %s can't be validated. ClusterClass %s can not be retrieved", cluster.Name, cluster.Spec.Topology.Class)) } // We gather all defaulting errors and return them together. diff --git a/internal/webhooks/cluster_test.go b/internal/webhooks/cluster_test.go index 9d86d5033615..f849ca2f0c52 100644 --- a/internal/webhooks/cluster_test.go +++ b/internal/webhooks/cluster_test.go @@ -46,7 +46,10 @@ func TestClusterDefaultNamespaces(t *testing.T) { }, } webhook := &Cluster{} - t.Run("for Cluster", customDefaultValidateTest(ctx, c, webhook)) + // TODO(sbueringer) We are storing the func in testFunc temporarily to work around + // an issue in thelper: https://github.com/kulti/thelper/issues/31 + testFunc := customDefaultValidateTest(ctx, c, webhook) + t.Run("for Cluster", testFunc) g.Expect(webhook.Default(ctx, c)).To(Succeed()) g.Expect(c.Spec.InfrastructureRef.Namespace).To(Equal(c.Namespace)) @@ -348,8 +351,10 @@ func TestClusterDefaultTopologyVersion(t *testing.T) { // Create the webhook and add the fakeClient as its client. webhook := &Cluster{Client: fakeClient} - - t.Run("for Cluster", customDefaultValidateTest(ctx, c, webhook)) + // TODO(sbueringer) We are storing the func in testFunc temporarily to work around + // an issue in thelper: https://github.com/kulti/thelper/issues/31 + testFunc := customDefaultValidateTest(ctx, c, webhook) + t.Run("for Cluster", testFunc) g.Expect(webhook.Default(ctx, c)).To(Succeed()) g.Expect(c.Spec.Topology.Version).To(HavePrefix("v")) diff --git a/internal/webhooks/clusterclass_test.go b/internal/webhooks/clusterclass_test.go index 5ab6fc4410d5..b9bb641af108 100644 --- a/internal/webhooks/clusterclass_test.go +++ b/internal/webhooks/clusterclass_test.go @@ -76,7 +76,10 @@ func TestClusterClassDefaultNamespaces(t *testing.T) { // Create the webhook and add the fakeClient as its client. webhook := &ClusterClass{Client: fakeClient} - t.Run("for ClusterClass", customDefaultValidateTest(ctx, in, webhook)) + // TODO(sbueringer) We are storing the func in testFunc temporarily to work around + // an issue in thelper: https://github.com/kulti/thelper/issues/31 + testFunc := customDefaultValidateTest(ctx, in, webhook) + t.Run("for ClusterClass", testFunc) g := NewWithT(t) g.Expect(webhook.Default(ctx, in)).To(Succeed()) diff --git a/test/framework/clusterctl/logger/log_file.go b/test/framework/clusterctl/logger/log_file.go index c0f999c8ce35..c37ee5024cd6 100644 --- a/test/framework/clusterctl/logger/log_file.go +++ b/test/framework/clusterctl/logger/log_file.go @@ -36,7 +36,7 @@ func CreateLogFile(input CreateLogFileInput) *LogFile { filePath := filepath.Join(input.LogFolder, input.Name) Expect(os.MkdirAll(filepath.Dir(filePath), 0750)).To(Succeed(), "Failed to create log folder %s", filepath.Dir(filePath)) - f, err := os.Create(filePath) + f, err := os.Create(filePath) //nolint:gosec // No security issue: filepath is safe. Expect(err).ToNot(HaveOccurred(), "Failed to create log file %s", filePath) return &LogFile{ diff --git a/test/framework/docker_logcollector.go b/test/framework/docker_logcollector.go index 812c6b922c86..4d69338b031d 100644 --- a/test/framework/docker_logcollector.go +++ b/test/framework/docker_logcollector.go @@ -163,5 +163,5 @@ func fileOnHost(path string) (*os.File, error) { if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil { return nil, err } - return os.Create(path) + return os.Create(path) //nolint:gosec // No security issue: path is safe. } diff --git a/test/framework/ginkgoextensions/output.go b/test/framework/ginkgoextensions/output.go index 3856a7e7b795..1dfc19046c89 100644 --- a/test/framework/ginkgoextensions/output.go +++ b/test/framework/ginkgoextensions/output.go @@ -58,7 +58,7 @@ func EnableFileLogging(path string) (io.WriteCloser, error) { } func newFileWriter(path string) (io.WriteCloser, error) { - f, err := os.Create(path) + f, err := os.Create(path) //nolint:gosec // No security issue: path is safe. if err != nil { return nil, errors.Wrap(err, "failed to create file") } diff --git a/test/framework/kubetest/setup.go b/test/framework/kubetest/setup.go index 637c9f9f4af6..4c3cf7574086 100644 --- a/test/framework/kubetest/setup.go +++ b/test/framework/kubetest/setup.go @@ -31,7 +31,7 @@ func copyFile(srcFilePath, destFilePath string) error { if err != nil { return err } - destFile, err := os.Create(destFilePath) + destFile, err := os.Create(destFilePath) //nolint:gosec // No security issue: destFilePath is safe. if err != nil { return err } diff --git a/test/infrastructure/container/docker.go b/test/infrastructure/container/docker.go index 610163f0fe87..1d27836e9d0d 100644 --- a/test/infrastructure/container/docker.go +++ b/test/infrastructure/container/docker.go @@ -78,7 +78,7 @@ func (d *dockerRuntime) SaveContainerImage(ctx context.Context, image, dest stri } defer reader.Close() - tar, err := os.Create(dest) + tar, err := os.Create(dest) //nolint:gosec // No security issue: dest is safe. if err != nil { return fmt.Errorf("failed to create destination file %q: %v", dest, err) } diff --git a/test/infrastructure/docker/api/v1beta1/dockercluster_webhook.go b/test/infrastructure/docker/api/v1beta1/dockercluster_webhook.go index 5e480a68e92f..95e801e10eb7 100644 --- a/test/infrastructure/docker/api/v1beta1/dockercluster_webhook.go +++ b/test/infrastructure/docker/api/v1beta1/dockercluster_webhook.go @@ -45,8 +45,7 @@ var _ webhook.Validator = &DockerCluster{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (c *DockerCluster) ValidateCreate() error { - allErrs := validateDockerClusterSpec(c.Spec) - if len(allErrs) > 0 { + if allErrs := validateDockerClusterSpec(c.Spec); len(allErrs) > 0 { return apierrors.NewInvalid(GroupVersion.WithKind("DockerCluster").GroupKind(), c.Name, allErrs) } return nil diff --git a/test/infrastructure/docker/internal/docker/machine.go b/test/infrastructure/docker/internal/docker/machine.go index 490bbe732f37..0417ddf10fc7 100644 --- a/test/infrastructure/docker/internal/docker/machine.go +++ b/test/infrastructure/docker/internal/docker/machine.go @@ -308,7 +308,7 @@ func (m *Machine) PreloadLoadImages(ctx context.Context, images []string) error if err != nil { return errors.Wrap(err, "failed to open image") } - defer f.Close() + defer f.Close() //nolint:gocritic // No resource leak. ps := m.container.Commander.Command("ctr", "--namespace=k8s.io", "images", "import", "-") ps.SetStdin(f)