Skip to content

Commit

Permalink
Merge pull request #6014 from sbueringer/pr-upgrade-golangci-lint-to-…
Browse files Browse the repository at this point in the history
…1.44

🌱 Upgrade to golangci-lint v1.44 and fix findings
  • Loading branch information
k8s-ci-robot authored Jan 31, 2022
2 parents 05e5c5b + dece5b1 commit 7f3c5f0
Show file tree
Hide file tree
Showing 25 changed files with 120 additions and 88 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/golangci-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,5 @@ jobs:
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: v1.43.0
version: v1.44.0
working-directory: ${{matrix.working-directory}}
13 changes: 11 additions & 2 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ linters:
enable:
- asciicheck
- bodyclose
- containedctx
- deadcode
- depguard
- dogsled
Expand Down Expand Up @@ -236,12 +237,20 @@ issues:
- typecheck
text: import (".+") is a program, not an importable package
path: ^tools\.go$
# Ignore ifshort false positive
# TODO(sbueringer) false positive: https://github.com/esimonov/ifshort/issues/23
# TODO(sbueringer) Ignore ifshort false positive: https://github.com/esimonov/ifshort/issues/23
- linters:
- ifshort
text: "variable 'isDeleteNodeAllowed' is only used in the if-statement.*"
path: ^internal/controllers/machine/machine_controller\.go$
- linters:
- ifshort
text: "variable 'kcpMachinesWithErrors' is only used in the if-statement.*"
path: ^controlplane/kubeadm/internal/workload_cluster_conditions\.go$
# We don't care about defer in for loops in test files.
- linters:
- gocritic
text: "deferInLoop: Possible resource leak, 'defer' is called in the 'for' loop"
path: _test\.go

run:
timeout: 10m
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/alpha/rollout_pauser.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func (r *rollout) ObjectPauser(proxy cluster.Proxy, ref corev1.ObjectReference)
return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name)
}
if deployment.Spec.Paused {
return errors.Errorf("MachineDeploymet is already paused: %v/%v\n", ref.Kind, ref.Name)
return errors.Errorf("MachineDeploymet is already paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // MachineDeployment is intentionally capitalized.
}
if err := pauseMachineDeployment(proxy, ref.Name, ref.Namespace); err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/alpha/rollout_restarter.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func (r *rollout) ObjectRestarter(proxy cluster.Proxy, ref corev1.ObjectReferenc
return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name)
}
if deployment.Spec.Paused {
return errors.Errorf("can't restart paused machinedeployment (run rollout resume first): %v/%v\n", ref.Kind, ref.Name)
return errors.Errorf("can't restart paused MachineDeployment (run rollout resume first): %v/%v", ref.Kind, ref.Name)
}
if err := setRestartedAtAnnotation(proxy, ref.Name, ref.Namespace); err != nil {
return err
Expand Down
4 changes: 2 additions & 2 deletions cmd/clusterctl/client/alpha/rollout_resumer.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,13 @@ func (r *rollout) ObjectResumer(proxy cluster.Proxy, ref corev1.ObjectReference)
return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name)
}
if !deployment.Spec.Paused {
return errors.Errorf("MachineDeployment is not currently paused: %v/%v\n", ref.Kind, ref.Name)
return errors.Errorf("MachineDeployment is not currently paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // MachineDeployment is intentionally capitalized.
}
if err := resumeMachineDeployment(proxy, ref.Name, ref.Namespace); err != nil {
return err
}
default:
return errors.Errorf("Invalid resource type %q, valid values are %v", ref.Kind, validResourceTypes)
return errors.Errorf("invalid resource type %q, valid values are %v", ref.Kind, validResourceTypes)
}
return nil
}
Expand Down
6 changes: 3 additions & 3 deletions cmd/clusterctl/client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func newFakeClient(configClient config.Client) *fakeClient {
// converting the client.Kubeconfig to cluster.Kubeconfig alias
k := cluster.Kubeconfig(i.Kubeconfig)
if _, ok := fake.clusters[k]; !ok {
return nil, errors.Errorf("Cluster for kubeconfig %q and/or context %q does not exist.", i.Kubeconfig.Path, i.Kubeconfig.Context)
return nil, errors.Errorf("Cluster for kubeconfig %q and/or context %q does not exist", i.Kubeconfig.Path, i.Kubeconfig.Context)
}
return fake.clusters[k], nil
}
Expand All @@ -179,7 +179,7 @@ func newFakeClient(configClient config.Client) *fakeClient {
InjectClusterClientFactory(clusterClientFactory),
InjectRepositoryFactory(func(input RepositoryClientFactoryInput) (repository.Client, error) {
if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok {
return nil, errors.Errorf("Repository for kubeconfig %q does not exist.", input.Provider.ManifestLabel())
return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel())
}
return fake.repositories[input.Provider.ManifestLabel()], nil
}),
Expand Down Expand Up @@ -222,7 +222,7 @@ func newFakeCluster(kubeconfig cluster.Kubeconfig, configClient config.Client) *
cluster.InjectPollImmediateWaiter(pollImmediateWaiter),
cluster.InjectRepositoryFactory(func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) {
if _, ok := fake.repositories[provider.Name()]; !ok {
return nil, errors.Errorf("Repository for kubeconfig %q does not exists.", provider.Name())
return nil, errors.Errorf("repository for kubeconfig %q does not exist", provider.Name())
}
return fake.repositories[provider.Name()], nil
}),
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/cluster/mover_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -929,7 +929,7 @@ func Test_objectMover_filesToObjs(t *testing.T) {

for _, fileName := range tt.files {
path := filepath.Join(dir, fileName)
file, err := os.Create(path)
file, err := os.Create(path) //nolint:gosec // No security issue: unit test.
if err != nil {
return
}
Expand Down
6 changes: 2 additions & 4 deletions cmd/clusterctl/client/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,16 +87,14 @@ func parseProviderName(provider string) (name string, version string, err error)
}

func validateDNS1123Label(label string) error {
errs := validation.IsDNS1123Label(label)
if len(errs) != 0 {
if errs := validation.IsDNS1123Label(label); len(errs) != 0 {
return errors.New(strings.Join(errs, "; "))
}
return nil
}

func validateDNS1123Domanin(subdomain string) error {
errs := validation.IsDNS1123Subdomain(subdomain)
if len(errs) != 0 {
if errs := validation.IsDNS1123Subdomain(subdomain); len(errs) != 0 {
return errors.New(strings.Join(errs, "; "))
}
return nil
Expand Down
3 changes: 1 addition & 2 deletions cmd/clusterctl/client/config/providers_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,8 +298,7 @@ func validateProvider(r Provider) error {
return errors.Errorf("name %s must be used with the %s type (name: %s, type: %s)", ClusterAPIProviderName, clusterctlv1.CoreProviderType, r.Name(), r.Type())
}

errMsgs := validation.IsDNS1123Subdomain(r.Name())
if len(errMsgs) != 0 {
if errMsgs := validation.IsDNS1123Subdomain(r.Name()); len(errMsgs) != 0 {
return errors.Errorf("invalid provider name: %s", strings.Join(errMsgs, "; "))
}
if r.URL() == "" {
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/config/reader_viper.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ func downloadFile(url string, filepath string) error {
ctx := context.TODO()

// Create the file
out, err := os.Create(filepath)
out, err := os.Create(filepath) //nolint:gosec // No security issue: filepath is safe.
if err != nil {
return errors.Wrapf(err, "failed to create the clusterctl config file %s", filepath)
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -823,7 +823,7 @@ func newFakeClientWithoutCluster(configClient config.Client) *fakeClient {
InjectConfig(fake.configClient),
InjectRepositoryFactory(func(input RepositoryClientFactoryInput) (repository.Client, error) {
if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok {
return nil, errors.Errorf("Repository for kubeconfig %q does not exist.", input.Provider.ManifestLabel())
return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel())
}
return fake.repositories[input.Provider.ManifestLabel()], nil
}),
Expand Down
4 changes: 2 additions & 2 deletions cmd/clusterctl/client/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error {
return err
}
if provider.Namespace == "" {
return errors.Errorf("Failed to identify the namespace for the %q provider.", provider.ProviderName)
return errors.Errorf("failed to identify the namespace for the %q provider", provider.ProviderName)
}

if provider.Version != "" {
Expand All @@ -125,7 +125,7 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error {
return err
}
if provider.Version != version {
return errors.Errorf("Failed to identity the provider %q with version %q.", provider.ProviderName, provider.Version)
return errors.Errorf("failed to identify the provider %q with version %q", provider.ProviderName, provider.Version)
}
}

Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/cmd/config_repositories.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func init() {

func runGetRepositories(cfgFile string, out io.Writer) error {
if cro.output != RepositoriesOutputText && cro.output != RepositoriesOutputYaml {
return errors.Errorf("Invalid output format %q. Valid values: %v.", cro.output, RepositoriesOutputs)
return errors.Errorf("invalid output format %q, valid values: %v", cro.output, RepositoriesOutputs)
}

if out == nil {
Expand Down
48 changes: 29 additions & 19 deletions controlplane/kubeadm/internal/workload_cluster_conditions.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"fmt"
"strings"

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -102,26 +103,8 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane
continue
}

// Create the etcd Client for the etcd Pod scheduled on the Node
etcdClient, err := w.etcdClientGenerator.forFirstAvailableNode(ctx, []string{node.Name})
currentMembers, err := w.getCurrentEtcdMembers(ctx, machine, node.Name)
if err != nil {
conditions.MarkUnknown(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to the etcd pod on the %s node: %s", node.Name, err)
continue
}
defer etcdClient.Close()

// While creating a new client, forFirstAvailableNode retrieves the status for the endpoint; check if the endpoint has errors.
if len(etcdClient.Errors) > 0 {
conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member status reports errors: %s", strings.Join(etcdClient.Errors, ", "))
continue
}

// Gets the list etcd members known by this member.
currentMembers, err := etcdClient.Members(ctx)
if err != nil {
// NB. We should never be in here, given that we just received answer to the etcd calls included in forFirstAvailableNode;
// however, we are considering the calls to Members a signal of etcd not being stable.
conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed get answer from the etcd member on the %s node", node.Name)
continue
}

Expand Down Expand Up @@ -182,6 +165,33 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane
})
}

func (w *Workload) getCurrentEtcdMembers(ctx context.Context, machine *clusterv1.Machine, nodeName string) ([]*etcd.Member, error) {
// Create the etcd Client for the etcd Pod scheduled on the Node
etcdClient, err := w.etcdClientGenerator.forFirstAvailableNode(ctx, []string{nodeName})
if err != nil {
conditions.MarkUnknown(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to the etcd pod on the %s node: %s", nodeName, err)
return nil, errors.Wrapf(err, "failed to get current etcd members: failed to connect to the etcd pod on the %s node", nodeName)
}
defer etcdClient.Close()

// While creating a new client, forFirstAvailableNode retrieves the status for the endpoint; check if the endpoint has errors.
if len(etcdClient.Errors) > 0 {
conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member status reports errors: %s", strings.Join(etcdClient.Errors, ", "))
return nil, errors.Errorf("failed to get current etcd members: etcd member status reports errors: %s", strings.Join(etcdClient.Errors, ", "))
}

// Gets the list etcd members known by this member.
currentMembers, err := etcdClient.Members(ctx)
if err != nil {
// NB. We should never be in here, given that we just received answer to the etcd calls included in forFirstAvailableNode;
// however, we are considering the calls to Members a signal of etcd not being stable.
conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed get answer from the etcd member on the %s node", nodeName)
return nil, errors.Errorf("failed to get current etcd members: failed get answer from the etcd member on the %s node", nodeName)
}

return currentMembers, nil
}

func compareMachinesAndMembers(controlPlane *ControlPlane, members []*etcd.Member, kcpErrors []string) []string {
// NOTE: We run this check only if we actually know the list of members, otherwise the first for loop
// could generate a false negative when reporting missing etcd members.
Expand Down
77 changes: 43 additions & 34 deletions controlplane/kubeadm/internal/workload_cluster_etcd.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,50 +37,59 @@ type etcdClientFor interface {
// ReconcileEtcdMembers iterates over all etcd members and finds members that do not have corresponding nodes.
// If there are any such members, it deletes them from etcd and removes their nodes from the kubeadm configmap so that kubeadm does not run etcd health checks on them.
func (w *Workload) ReconcileEtcdMembers(ctx context.Context, nodeNames []string, version semver.Version) ([]string, error) {
removedMembers := []string{}
errs := []error{}
allRemovedMembers := []string{}
allErrs := []error{}
for _, nodeName := range nodeNames {
// Create the etcd Client for the etcd Pod scheduled on the Node
etcdClient, err := w.etcdClientGenerator.forFirstAvailableNode(ctx, []string{nodeName})
if err != nil {
continue
}
defer etcdClient.Close()
removedMembers, errs := w.reconcileEtcdMember(ctx, nodeNames, nodeName, version)
allRemovedMembers = append(allRemovedMembers, removedMembers...)
allErrs = append(allErrs, errs...)
}

return allRemovedMembers, kerrors.NewAggregate(allErrs)
}

members, err := etcdClient.Members(ctx)
if err != nil {
func (w *Workload) reconcileEtcdMember(ctx context.Context, nodeNames []string, nodeName string, version semver.Version) ([]string, []error) {
// Create the etcd Client for the etcd Pod scheduled on the Node
etcdClient, err := w.etcdClientGenerator.forFirstAvailableNode(ctx, []string{nodeName})
if err != nil {
return nil, nil
}
defer etcdClient.Close()

members, err := etcdClient.Members(ctx)
if err != nil {
return nil, nil
}

// Check if any member's node is missing from workload cluster
// If any, delete it with best effort
removedMembers := []string{}
errs := []error{}
loopmembers:
for _, member := range members {
// If this member is just added, it has a empty name until the etcd pod starts. Ignore it.
if member.Name == "" {
continue
}

// Check if any member's node is missing from workload cluster
// If any, delete it with best effort
loopmembers:
for _, member := range members {
// If this member is just added, it has a empty name until the etcd pod starts. Ignore it.
if member.Name == "" {
continue
}

for _, nodeName := range nodeNames {
if member.Name == nodeName {
// We found the matching node, continue with the outer loop.
continue loopmembers
}
for _, nodeName := range nodeNames {
if member.Name == nodeName {
// We found the matching node, continue with the outer loop.
continue loopmembers
}
}

// If we're here, the node cannot be found.
removedMembers = append(removedMembers, member.Name)
if err := w.removeMemberForNode(ctx, member.Name); err != nil {
errs = append(errs, err)
}
// If we're here, the node cannot be found.
removedMembers = append(removedMembers, member.Name)
if err := w.removeMemberForNode(ctx, member.Name); err != nil {
errs = append(errs, err)
}

if err := w.RemoveNodeFromKubeadmConfigMap(ctx, member.Name, version); err != nil {
errs = append(errs, err)
}
if err := w.RemoveNodeFromKubeadmConfigMap(ctx, member.Name, version); err != nil {
errs = append(errs, err)
}
}

return removedMembers, kerrors.NewAggregate(errs)
return removedMembers, errs
}

// UpdateEtcdVersionInKubeadmConfigMap sets the imageRepository or the imageTag or both in the kubeadm config map.
Expand Down
2 changes: 1 addition & 1 deletion internal/webhooks/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func (webhook *Cluster) Default(ctx context.Context, obj runtime.Object) error {
clusterClass, err := webhook.getClusterClassForCluster(ctx, cluster)
if err != nil {
// Return early with errors if the ClusterClass can't be retrieved.
return apierrors.NewInternalError(errors.Wrapf(err, "Cluster %s can't be validated. ClusterClass %s can not be retrieved.", cluster.Name, cluster.Spec.Topology.Class))
return apierrors.NewInternalError(errors.Wrapf(err, "Cluster %s can't be validated. ClusterClass %s can not be retrieved", cluster.Name, cluster.Spec.Topology.Class))
}

// We gather all defaulting errors and return them together.
Expand Down
11 changes: 8 additions & 3 deletions internal/webhooks/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,10 @@ func TestClusterDefaultNamespaces(t *testing.T) {
},
}
webhook := &Cluster{}
t.Run("for Cluster", customDefaultValidateTest(ctx, c, webhook))
// TODO(sbueringer) We are storing the func in testFunc temporarily to work around
// an issue in thelper: https://github.com/kulti/thelper/issues/31
testFunc := customDefaultValidateTest(ctx, c, webhook)
t.Run("for Cluster", testFunc)
g.Expect(webhook.Default(ctx, c)).To(Succeed())

g.Expect(c.Spec.InfrastructureRef.Namespace).To(Equal(c.Namespace))
Expand Down Expand Up @@ -348,8 +351,10 @@ func TestClusterDefaultTopologyVersion(t *testing.T) {

// Create the webhook and add the fakeClient as its client.
webhook := &Cluster{Client: fakeClient}

t.Run("for Cluster", customDefaultValidateTest(ctx, c, webhook))
// TODO(sbueringer) We are storing the func in testFunc temporarily to work around
// an issue in thelper: https://github.com/kulti/thelper/issues/31
testFunc := customDefaultValidateTest(ctx, c, webhook)
t.Run("for Cluster", testFunc)
g.Expect(webhook.Default(ctx, c)).To(Succeed())

g.Expect(c.Spec.Topology.Version).To(HavePrefix("v"))
Expand Down
5 changes: 4 additions & 1 deletion internal/webhooks/clusterclass_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,10 @@ func TestClusterClassDefaultNamespaces(t *testing.T) {

// Create the webhook and add the fakeClient as its client.
webhook := &ClusterClass{Client: fakeClient}
t.Run("for ClusterClass", customDefaultValidateTest(ctx, in, webhook))
// TODO(sbueringer) We are storing the func in testFunc temporarily to work around
// an issue in thelper: https://github.com/kulti/thelper/issues/31
testFunc := customDefaultValidateTest(ctx, in, webhook)
t.Run("for ClusterClass", testFunc)

g := NewWithT(t)
g.Expect(webhook.Default(ctx, in)).To(Succeed())
Expand Down
Loading

0 comments on commit 7f3c5f0

Please sign in to comment.