diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000000..80c5eca26e46 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,21 @@ +linters: + enable: + - govet + - gofmt + - structcheck + - varcheck + - interfacer + - unconvert + - ineffassign + - goconst + - gocyclo + - misspell + - nakedret + - prealloc + - gosec + disable-all: true + # Run with --fast=false for more extensive checks + fast: true +issue: + max-same-issues: 0 + max-per-linter: 0 diff --git a/BUILD.bazel b/BUILD.bazel index 8f39d36c9411..0df1f92d7c7c 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1,7 +1,20 @@ load("@bazel_gazelle//:def.bzl", "gazelle") +load("//build:run_in_workspace_with_goroot.bzl", "workspace_binary") # gazelle:prefix sigs.k8s.io/cluster-api gazelle( name = "gazelle", external = "vendored", ) + +workspace_binary( + name = "lint", + args = ["run"], + cmd = "@com_github_golangci_golangci-lint//cmd/golangci-lint", +) + +workspace_binary( + name = "lint-full", + args = ["run --fast=false"], + cmd = "@com_github_golangci_golangci-lint//cmd/golangci-lint", +) diff --git a/Makefile b/Makefile index d48df044fde2..02fd53724be7 100644 --- a/Makefile +++ b/Makefile @@ -72,6 +72,13 @@ fmt: ## Run go fmt against code vet: ## Run go vet against code go vet ./pkg/... ./cmd/... +.PHONY: lint +lint: dep-ensure ## Lint codebase + bazel run //:lint $(BAZEL_ARGS) + +lint-full: dep-ensure ## Run slower linters to detect possible issues + bazel run //:lint-full $(BAZEL_ARGS) + .PHONY: generate generate: clientset dep-ensure ## Generate code go generate ./pkg/... ./cmd/... @@ -92,6 +99,10 @@ clientset: ## Generate a typed clientset --output-package sigs.k8s.io/cluster-api/pkg/client/informers_generated \ --go-header-file=./hack/boilerplate.go.txt +.PHONY: clean +clean: ## Remove all generated files + rm -f bazel-* + .PHONY: docker-build docker-build: generate fmt vet manifests ## Build the docker image docker build . -t ${IMG} diff --git a/WORKSPACE b/WORKSPACE index 27dbf6850ca1..9f888efc948a 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,4 +1,5 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") http_archive( name = "io_bazel_rules_go", @@ -29,3 +30,11 @@ go_repository( commit = "58492e2d83c59ed63881311f46ad6251f77dabc3", importpath = "sigs.k8s.io/kustomize", ) + +go_repository( + name = "com_github_golangci_golangci-lint", + build_file_generation = "on", + importpath = "github.com/golangci/golangci-lint", + strip_prefix = "golangci-lint-4570c043a9b56ccf0372cb6ba7a8b645d92b3357", + urls = ["https://github.com/golangci/golangci-lint/archive/4570c043a9b56ccf0372cb6ba7a8b645d92b3357.zip"], +) diff --git a/build/BUILD b/build/BUILD new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/build/run_in_workspace_with_goroot.bzl b/build/run_in_workspace_with_goroot.bzl new file mode 100644 index 000000000000..5280630f5c65 --- /dev/null +++ b/build/run_in_workspace_with_goroot.bzl @@ -0,0 +1,103 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is a modified version of the same rule from kubernetes/repo-infra +# modified to add the GO SDK to the PATH environment variable. + +# Writes out a script which saves the runfiles directory, +# changes to the workspace root, and then runs a command. + +def _workspace_binary_script_impl(ctx): + content = """#!/usr/bin/env bash +set -o errexit +set -o nounset +set -o pipefail +GOBINDIR=$(dirname $(readlink {go_bin})) +export PATH=$GOBINDIR:$PATH +BASE=$(pwd) +cd $(dirname $(readlink {root_file})) +"$BASE/{cmd}" $@ +""".format( + cmd = ctx.file.cmd.short_path, + root_file = ctx.file.root_file.short_path, + go_bin = ctx.file.go_bin.short_path, + ) + ctx.actions.write( + output = ctx.outputs.executable, + content = content, + is_executable = True, + ) + runfiles = ctx.runfiles( + files = [ + ctx.file.cmd, + ctx.file.go_bin, + ctx.file.root_file, + ], + ) + return [DefaultInfo(runfiles = runfiles)] + +_workspace_binary_script = rule( + attrs = { + "cmd": attr.label( + mandatory = True, + allow_files = True, + single_file = True, + ), + "root_file": attr.label( + mandatory = True, + allow_files = True, + single_file = True, + ), + "go_bin": attr.label( + mandatory = True, + allow_files = True, + single_file = True, + ), + }, + executable = True, + implementation = _workspace_binary_script_impl, +) + +# Wraps a binary to be run in the workspace root via bazel run. +# +# For example, one might do something like +# +# workspace_binary( +# name = "dep", +# cmd = "//vendor/github.com/golang/dep/cmd/dep", +# ) +# +# which would allow running dep with bazel run. +def workspace_binary( + name, + cmd, + args = None, + visibility = None, + go_bin = "@go_sdk//:bin/go", + root_file = "//:WORKSPACE"): + script_name = name + "_script" + _workspace_binary_script( + name = script_name, + cmd = cmd, + root_file = root_file, + go_bin = go_bin, + tags = ["manual"], + ) + native.sh_binary( + name = name, + srcs = [":" + script_name], + args = args, + visibility = visibility, + tags = ["manual"], + ) diff --git a/cmd/clusterctl/clientcmd/configutil.go b/cmd/clusterctl/clientcmd/configutil.go index faef692ed851..9af03d014974 100644 --- a/cmd/clusterctl/clientcmd/configutil.go +++ b/cmd/clusterctl/clientcmd/configutil.go @@ -42,10 +42,10 @@ func NewCoreClientSetForDefaultSearchPath(kubeconfigPath string, overrides clien return kubernetes.NewForConfig(config) } -// NewClusterApiClientForDefaultSearchPath creates a Cluster API clientset. If the kubeconfigPath is specified then the configuration is loaded from that path. +// NewClusterAPIClientForDefaultSearchPath creates a Cluster API clientset. If the kubeconfigPath is specified then the configuration is loaded from that path. // Otherwise the default kubeconfig search path is used. // The overrides parameter is used to select a specific context of the config, for example, select the context with a given cluster name or namespace. -func NewClusterApiClientForDefaultSearchPath(kubeconfigPath string, overrides clientcmd.ConfigOverrides) (*clientset.Clientset, error) { +func NewClusterAPIClientForDefaultSearchPath(kubeconfigPath string, overrides clientcmd.ConfigOverrides) (*clientset.Clientset, error) { config, err := newRestConfigForDefaultSearchPath(kubeconfigPath, overrides) if err != nil { return nil, err @@ -72,16 +72,16 @@ func newRestConfigForDefaultSearchPath(kubeconfigPath string, overrides clientcm return config, nil } } - apiConfig, err := newApiConfigForDefaultSearchPath(kubeconfigPath) + apiConfig, err := newAPIConfigForDefaultSearchPath(kubeconfigPath) if err != nil { return nil, err } return newRestConfig(apiConfig, overrides) } -// newApiConfigForDefaultSearchPath creates an api.Config by searching for the kubeconfig on the default search path. If an override 'kubeconfigPath' is +// newAPIConfigForDefaultSearchPath creates an api.Config by searching for the kubeconfig on the default search path. If an override 'kubeconfigPath' is // given then that path is used instead of the default path. -func newApiConfigForDefaultSearchPath(kubeconfigPath string) (*api.Config, error) { +func newAPIConfigForDefaultSearchPath(kubeconfigPath string) (*api.Config, error) { configLoader := clientcmd.NewDefaultClientConfigLoadingRules() configLoader.ExplicitPath = kubeconfigPath return configLoader.Load() diff --git a/cmd/clusterctl/clusterdeployer/bootstrap/existing/existingcluster_test.go b/cmd/clusterctl/clusterdeployer/bootstrap/existing/existingcluster_test.go index a6afff09d8ca..966f5841bbd0 100644 --- a/cmd/clusterctl/clusterdeployer/bootstrap/existing/existingcluster_test.go +++ b/cmd/clusterctl/clusterdeployer/bootstrap/existing/existingcluster_test.go @@ -61,6 +61,8 @@ func createTempFile(contents string) (string, error) { return "", err } defer f.Close() - f.WriteString(contents) + if _, err := f.WriteString(contents); err != nil { + return "", err + } return f.Name(), nil } diff --git a/cmd/clusterctl/clusterdeployer/bootstrap/kind/BUILD.bazel b/cmd/clusterctl/clusterdeployer/bootstrap/kind/BUILD.bazel index fb52941e4f13..d38c05ba34b3 100644 --- a/cmd/clusterctl/clusterdeployer/bootstrap/kind/BUILD.bazel +++ b/cmd/clusterctl/clusterdeployer/bootstrap/kind/BUILD.bazel @@ -5,11 +5,15 @@ go_library( srcs = ["kind.go"], importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind", visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/klog:go_default_library"], + deps = [ + "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], ) go_test( name = "go_default_test", srcs = ["kind_test.go"], embed = [":go_default_library"], + deps = ["//vendor/github.com/pkg/errors:go_default_library"], ) diff --git a/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind.go b/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind.go index 90ea4c8701fe..db58fe47509b 100644 --- a/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind.go +++ b/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind.go @@ -27,8 +27,7 @@ import ( ) const ( - kubeconfigEnvVar = "KUBECONFIG" - kindClusterName = "clusterapi" + kindClusterName = "clusterapi" ) type Kind struct { diff --git a/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind_test.go b/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind_test.go index 6d2e4c66fa4e..fc452f8563b9 100644 --- a/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind_test.go +++ b/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind_test.go @@ -121,6 +121,8 @@ func createTempFile(contents string) (string, error) { return "", err } defer f.Close() - f.WriteString(contents) + if _, err := f.WriteString(contents); err != nil { + return "", err + } return f.Name(), nil } diff --git a/cmd/clusterctl/clusterdeployer/bootstrap/minikube/minikube_test.go b/cmd/clusterctl/clusterdeployer/bootstrap/minikube/minikube_test.go index 9cd1daef0053..1670f73fc4ec 100644 --- a/cmd/clusterctl/clusterdeployer/bootstrap/minikube/minikube_test.go +++ b/cmd/clusterctl/clusterdeployer/bootstrap/minikube/minikube_test.go @@ -147,6 +147,8 @@ func createTempFile(contents string) (string, error) { return "", err } defer f.Close() - f.WriteString(contents) + if _, err := f.WriteString(contents); err != nil { + return "", err + } return f.Name(), nil } diff --git a/cmd/clusterctl/clusterdeployer/bootstrap/provisioner.go b/cmd/clusterctl/clusterdeployer/bootstrap/provisioner.go index 09b266f25a7f..2f11376ceb35 100644 --- a/cmd/clusterctl/clusterdeployer/bootstrap/provisioner.go +++ b/cmd/clusterctl/clusterdeployer/bootstrap/provisioner.go @@ -41,6 +41,6 @@ func Get(o Options) (ClusterProvisioner, error) { return existing.NewExistingCluster(o.KubeConfig) } - return nil, errors.New("no bootstrap provisioner specified, you can specify `--bootstrap-cluster-kubeconfig` to use an existing Kubernetes cluster or `--bootstrap-type` to use a built-in ephemeral cluster.") + return nil, errors.New("no bootstrap provisioner specified, you can specify `--bootstrap-cluster-kubeconfig` to use an existing Kubernetes cluster or `--bootstrap-type` to use a built-in ephemeral cluster") } } diff --git a/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go b/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go index 8e5d5c69f273..10825621f1ad 100644 --- a/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go +++ b/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go @@ -143,7 +143,7 @@ func (c *client) DeleteNamespace(namespaceName string) error { // NewFromDefaultSearchPath creates and returns a Client. The kubeconfigFile argument is expected to be the path to a // valid kubeconfig file. func NewFromDefaultSearchPath(kubeconfigFile string, overrides tcmd.ConfigOverrides) (*client, error) { - c, err := clientcmd.NewClusterApiClientForDefaultSearchPath(kubeconfigFile, overrides) + c, err := clientcmd.NewClusterAPIClientForDefaultSearchPath(kubeconfigFile, overrides) if err != nil { return nil, err } diff --git a/cmd/clusterctl/clusterdeployer/clusterdeployer_test.go b/cmd/clusterctl/clusterdeployer/clusterdeployer_test.go index cb40fbee0196..b32d7c4075e8 100644 --- a/cmd/clusterctl/clusterdeployer/clusterdeployer_test.go +++ b/cmd/clusterctl/clusterdeployer/clusterdeployer_test.go @@ -301,7 +301,7 @@ func (c *testClusterClient) DeleteNamespace(namespaceName string) error { return nil } - var ns []string + ns := make([]string, 0, len(c.namespaces)) for _, n := range c.namespaces { if n == namespaceName { continue @@ -1210,7 +1210,7 @@ func generateTestNodeMachine(name string) *clusterv1.Machine { } func generateTestControlPlaneMachines(controlPlaneNames []string) []*clusterv1.Machine { - var controlPlanes []*clusterv1.Machine + controlPlanes := make([]*clusterv1.Machine, 0, len(controlPlaneNames)) for _, mn := range controlPlaneNames { controlPlanes = append(controlPlanes, generateTestControlPlaneMachine(mn)) } @@ -1218,7 +1218,7 @@ func generateTestControlPlaneMachines(controlPlaneNames []string) []*clusterv1.M } func generateTestNodeMachines(nodeNames []string) []*clusterv1.Machine { - var nodes []*clusterv1.Machine + nodes := make([]*clusterv1.Machine, 0, len(nodeNames)) for _, nn := range nodeNames { nodes = append(nodes, generateTestNodeMachine(nn)) } @@ -1247,8 +1247,8 @@ func generateMachines() []*clusterv1.Machine { func newMachineSetsFixture() []*clusterv1.MachineSet { return []*clusterv1.MachineSet{ - &clusterv1.MachineSet{ObjectMeta: metav1.ObjectMeta{Name: "machine-set-name-1"}}, - &clusterv1.MachineSet{ObjectMeta: metav1.ObjectMeta{Name: "machine-set-name-2"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine-set-name-1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine-set-name-2"}}, } } @@ -1267,15 +1267,15 @@ func getClustersForNamespace(namespace string, count int) []*clusterv1.Cluster { func newMachineDeploymentsFixture() []*clusterv1.MachineDeployment { return []*clusterv1.MachineDeployment{ - &clusterv1.MachineDeployment{ObjectMeta: metav1.ObjectMeta{Name: "machine-deployment-name-1"}}, - &clusterv1.MachineDeployment{ObjectMeta: metav1.ObjectMeta{Name: "machine-deployment-name-2"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine-deployment-name-1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine-deployment-name-2"}}, } } func newClustersFixture() []*clusterv1.Cluster { return []*clusterv1.Cluster{ - &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster-name-1"}}, - &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster-name-2"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster-name-1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster-name-2"}}, } } diff --git a/cmd/clusterctl/validation/validate_cluster_api_objects.go b/cmd/clusterctl/validation/validate_cluster_api_objects.go index eecf84a6b2b4..2a36a73291d0 100644 --- a/cmd/clusterctl/validation/validate_cluster_api_objects.go +++ b/cmd/clusterctl/validation/validate_cluster_api_objects.go @@ -50,7 +50,7 @@ func ValidateClusterAPIObjects(w io.Writer, c client.Client, clusterName string, return validateMachineObjects(w, machines, c) } -func getClusterObject(c client.Client, clusterName string, namespace string) (*v1alpha1.Cluster, error) { +func getClusterObject(c client.Reader, clusterName string, namespace string) (*v1alpha1.Cluster, error) { if clusterName != "" { cluster := &clusterv1alpha1.Cluster{} err := c.Get(context.TODO(), types.NamespacedName{Name: clusterName, Namespace: namespace}, cluster) @@ -97,7 +97,7 @@ func validateMachineObjects(w io.Writer, machines *v1alpha1.MachineList, client func validateMachineObject(w io.Writer, machine v1alpha1.Machine, client client.Client) bool { fmt.Fprintf(w, "Checking machine object %q... ", machine.Name) if machine.Status.ErrorReason != nil || machine.Status.ErrorMessage != nil { - var reason common.MachineStatusError = "" + var reason common.MachineStatusError if machine.Status.ErrorReason != nil { reason = *machine.Status.ErrorReason } diff --git a/pkg/controller/machinedeployment/controller.go b/pkg/controller/machinedeployment/controller.go index 8d1f90b6aaf7..1109b8ad1468 100644 --- a/pkg/controller/machinedeployment/controller.go +++ b/pkg/controller/machinedeployment/controller.go @@ -112,8 +112,8 @@ func (r *ReconcileMachineDeployment) getMachineSetsForDeployment(d *v1alpha1.Mac // TODO: flush out machine set adoption. - var filteredMS []*v1alpha1.MachineSet - for idx, _ := range machineSets.Items { + filteredMS := make([]*v1alpha1.MachineSet, 0, len(machineSets.Items)) + for idx := range machineSets.Items { ms := &machineSets.Items[idx] if metav1.GetControllerOf(ms) == nil || (metav1.GetControllerOf(ms) != nil && !metav1.IsControlledBy(ms, d)) { klog.V(4).Infof("%s not controlled by %v", ms.Name, d.Name) @@ -220,7 +220,7 @@ func (r *ReconcileMachineDeployment) getMachineDeploymentsForMachineSet(ms *v1al return nil } - var deployments []*v1alpha1.MachineDeployment + deployments := make([]*v1alpha1.MachineDeployment, 0, len(dList.Items)) for idx, d := range dList.Items { selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) if err != nil { diff --git a/pkg/controller/machinedeployment/rolling.go b/pkg/controller/machinedeployment/rolling.go index 45e650e14232..0f0e821cfb94 100644 --- a/pkg/controller/machinedeployment/rolling.go +++ b/pkg/controller/machinedeployment/rolling.go @@ -247,7 +247,7 @@ func (r *ReconcileMachineDeployment) scaleDownOldMachineSetsForRollingUpdate(all continue } // Scale down. - scaleDownCount := int32(integer.Int32Min(*(targetMS.Spec.Replicas), totalScaleDownCount-totalScaledDown)) + scaleDownCount := integer.Int32Min(*(targetMS.Spec.Replicas), totalScaleDownCount-totalScaledDown) newReplicasCount := *(targetMS.Spec.Replicas) - scaleDownCount if newReplicasCount > *(targetMS.Spec.Replicas) { return totalScaledDown, errors.Errorf("when scaling down old MS, got invalid request to scale down %s/%s %d -> %d", targetMS.Namespace, targetMS.Name, *(targetMS.Spec.Replicas), newReplicasCount) diff --git a/pkg/controller/machinedeployment/sync.go b/pkg/controller/machinedeployment/sync.go index c6a144563070..e5944c5735ca 100644 --- a/pkg/controller/machinedeployment/sync.go +++ b/pkg/controller/machinedeployment/sync.go @@ -182,7 +182,6 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.Machine controllerRef := metav1.GetControllerOf(ms) if controllerRef != nil && controllerRef.UID == d.UID && dutil.EqualIgnoreHash(&d.Spec.Template, &ms.Spec.Template) { createdMS = ms - err = nil break } diff --git a/pkg/controller/machinedeployment/util/util.go b/pkg/controller/machinedeployment/util/util.go index b06fbbbd14ac..07eebd0d0bfd 100644 --- a/pkg/controller/machinedeployment/util/util.go +++ b/pkg/controller/machinedeployment/util/util.go @@ -429,7 +429,7 @@ func FindNewMachineSet(deployment *v1alpha1.MachineDeployment, msList []*v1alpha // - the second contains all old machine sets func FindOldMachineSets(deployment *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) ([]*v1alpha1.MachineSet, []*v1alpha1.MachineSet) { var requiredMSs []*v1alpha1.MachineSet - var allMSs []*v1alpha1.MachineSet + allMSs := make([]*v1alpha1.MachineSet, 0, len(msList)) newMS := FindNewMachineSet(deployment, msList) for _, ms := range msList { // Filter out new machine set @@ -524,7 +524,7 @@ func NewMSNewReplicas(deployment *v1alpha1.MachineDeployment, allMSs []*v1alpha1 // Scale up. scaleUpCount := maxTotalMachines - currentMachineCount // Do not exceed the number of desired replicas. - scaleUpCount = int32(integer.Int32Min(scaleUpCount, *(deployment.Spec.Replicas)-*(newMS.Spec.Replicas))) + scaleUpCount = integer.Int32Min(scaleUpCount, *(deployment.Spec.Replicas)-*(newMS.Spec.Replicas)) return *(newMS.Spec.Replicas) + scaleUpCount, nil default: // Check if we can scale up. @@ -542,7 +542,7 @@ func NewMSNewReplicas(deployment *v1alpha1.MachineDeployment, allMSs []*v1alpha1 // Scale up. scaleUpCount := maxTotalMachines - currentMachineCount // Do not exceed the number of desired replicas. - scaleUpCount = int32(integer.Int32Min(scaleUpCount, *(deployment.Spec.Replicas)-*(newMS.Spec.Replicas))) + scaleUpCount = integer.Int32Min(scaleUpCount, *(deployment.Spec.Replicas)-*(newMS.Spec.Replicas)) return *(newMS.Spec.Replicas) + scaleUpCount, nil // -- return 0, errors.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) } diff --git a/pkg/controller/machineset/controller.go b/pkg/controller/machineset/controller.go index 3ee99b58b083..176129b244e1 100644 --- a/pkg/controller/machineset/controller.go +++ b/pkg/controller/machineset/controller.go @@ -159,7 +159,7 @@ func (r *ReconcileMachineSet) Reconcile(request reconcile.Request) (reconcile.Re } // Filter out irrelevant machines (deleting/mismatch labels) and claim orphaned machines. - var filteredMachines []*clusterv1alpha1.Machine + filteredMachines := make([]*clusterv1alpha1.Machine, 0, len(allMachines.Items)) for idx := range allMachines.Items { machine := &allMachines.Items[idx] if shouldExcludeMachine(machineSet, machine) { @@ -212,7 +212,7 @@ func (r *ReconcileMachineSet) Reconcile(request reconcile.Request) (reconcile.Re } // syncReplicas essentially scales machine resources up and down. -func (c *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machines []*clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machines []*clusterv1alpha1.Machine) error { if ms.Spec.Replicas == nil { return errors.Errorf("the Replicas field in Spec for machineset %v is nil, this should not be allowed", ms.Name) } @@ -226,8 +226,8 @@ func (c *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machi var errstrings []string for i := 0; i < diff; i++ { klog.Infof("creating machine %d of %d, ( spec.replicas(%d) > currentMachineCount(%d) )", i+1, diff, *(ms.Spec.Replicas), len(machines)) - machine := c.createMachine(ms) - err := c.Client.Create(context.Background(), machine) + machine := r.createMachine(ms) + err := r.Client.Create(context.Background(), machine) if err != nil { klog.Errorf("unable to create a machine = %s, due to %v", machine.Name, err) errstrings = append(errstrings, err.Error()) @@ -239,7 +239,7 @@ func (c *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machi if len(errstrings) > 0 { return errors.New(strings.Join(errstrings, "; ")) } - return c.waitForMachineCreation(machineList) + return r.waitForMachineCreation(machineList) } else if diff > 0 { klog.Infof("Too many replicas for %v %s/%s, need %d, deleting %d", controllerKind, ms.Namespace, ms.Name, *(ms.Spec.Replicas), diff) @@ -253,7 +253,7 @@ func (c *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machi for _, machine := range machinesToDelete { go func(targetMachine *clusterv1alpha1.Machine) { defer wg.Done() - err := c.Client.Delete(context.Background(), targetMachine) + err := r.Client.Delete(context.Background(), targetMachine) if err != nil { klog.Errorf("unable to delete a machine = %s, due to %v", targetMachine.Name, err) errCh <- err @@ -270,7 +270,7 @@ func (c *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machi } default: } - return c.waitForMachineDeletion(machinesToDelete) + return r.waitForMachineDeletion(machinesToDelete) } return nil @@ -278,7 +278,7 @@ func (c *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machi // createMachine creates a machine resource. // the name of the newly created resource is going to be created by the API server, we set the generateName field -func (c *ReconcileMachineSet) createMachine(machineSet *clusterv1alpha1.MachineSet) *clusterv1alpha1.Machine { +func (r *ReconcileMachineSet) createMachine(machineSet *clusterv1alpha1.MachineSet) *clusterv1alpha1.Machine { gv := clusterv1alpha1.SchemeGroupVersion machine := &clusterv1alpha1.Machine{ TypeMeta: metav1.TypeMeta{ @@ -311,7 +311,7 @@ func shouldExcludeMachine(machineSet *clusterv1alpha1.MachineSet, machine *clust return false } -func (c *ReconcileMachineSet) adoptOrphan(machineSet *clusterv1alpha1.MachineSet, machine *clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) adoptOrphan(machineSet *clusterv1alpha1.MachineSet, machine *clusterv1alpha1.Machine) error { // Add controller reference. ownerRefs := machine.ObjectMeta.GetOwnerReferences() if ownerRefs == nil { @@ -321,17 +321,17 @@ func (c *ReconcileMachineSet) adoptOrphan(machineSet *clusterv1alpha1.MachineSet newRef := *metav1.NewControllerRef(machineSet, controllerKind) ownerRefs = append(ownerRefs, newRef) machine.ObjectMeta.SetOwnerReferences(ownerRefs) - if err := c.Client.Update(context.Background(), machine); err != nil { + if err := r.Client.Update(context.Background(), machine); err != nil { klog.Warningf("Failed to update machine owner reference. %v", err) return err } return nil } -func (c *ReconcileMachineSet) waitForMachineCreation(machineList []*clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) waitForMachineCreation(machineList []*clusterv1alpha1.Machine) error { for _, machine := range machineList { pollErr := util.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) { - err := c.Client.Get(context.Background(), + err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name}, &clusterv1alpha1.Machine{}) if err == nil { @@ -351,11 +351,11 @@ func (c *ReconcileMachineSet) waitForMachineCreation(machineList []*clusterv1alp return nil } -func (c *ReconcileMachineSet) waitForMachineDeletion(machineList []*clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) waitForMachineDeletion(machineList []*clusterv1alpha1.Machine) error { for _, machine := range machineList { pollErr := util.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) { m := &clusterv1alpha1.Machine{} - err := c.Client.Get(context.Background(), + err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name}, m) if apierrors.IsNotFound(err) || !m.DeletionTimestamp.IsZero() { diff --git a/pkg/controller/noderefutil/util_test.go b/pkg/controller/noderefutil/util_test.go index 46fc263b8db5..df8ba33c63d2 100644 --- a/pkg/controller/noderefutil/util_test.go +++ b/pkg/controller/noderefutil/util_test.go @@ -50,7 +50,7 @@ func TestIsNodeAvaialble(t *testing.T) { name: "no ready condition", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeOutOfDisk, Status: corev1.ConditionTrue, }, @@ -62,7 +62,7 @@ func TestIsNodeAvaialble(t *testing.T) { name: "ready condition true, minReadySeconds = 0, lastTransitionTime now", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionTrue, LastTransitionTime: metav1.Now(), @@ -75,7 +75,7 @@ func TestIsNodeAvaialble(t *testing.T) { name: "ready condition true, minReadySeconds = 0, lastTransitionTime past", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionTrue, LastTransitionTime: metav1.Time{Time: time.Now().Add(time.Duration(-700) * time.Second)}, @@ -88,7 +88,7 @@ func TestIsNodeAvaialble(t *testing.T) { name: "ready condition true, minReadySeconds = 300, lastTransitionTime now", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionTrue, LastTransitionTime: metav1.Now(), @@ -102,7 +102,7 @@ func TestIsNodeAvaialble(t *testing.T) { name: "ready condition true, minReadySeconds = 300, lastTransitionTime past", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionTrue, LastTransitionTime: metav1.Time{Time: time.Now().Add(time.Duration(-700) * time.Second)}, @@ -116,7 +116,7 @@ func TestIsNodeAvaialble(t *testing.T) { name: "ready condition false", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionFalse, }, @@ -128,7 +128,7 @@ func TestIsNodeAvaialble(t *testing.T) { name: "ready condition unknown", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionUnknown, }, @@ -165,7 +165,7 @@ func TestGetReadyCondition(t *testing.T) { name: "no ready condition", nodeStatus: &corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeOutOfDisk, Status: corev1.ConditionTrue, }, @@ -176,7 +176,7 @@ func TestGetReadyCondition(t *testing.T) { name: "ready condition true", nodeStatus: &corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionTrue, }, @@ -191,7 +191,7 @@ func TestGetReadyCondition(t *testing.T) { name: "ready condition false", nodeStatus: &corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionFalse, }, @@ -206,7 +206,7 @@ func TestGetReadyCondition(t *testing.T) { name: "ready condition unknown", nodeStatus: &corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionUnknown, }, @@ -253,7 +253,7 @@ func TestIsNodeReady(t *testing.T) { name: "no ready condition", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeOutOfDisk, Status: corev1.ConditionTrue, }, @@ -265,7 +265,7 @@ func TestIsNodeReady(t *testing.T) { name: "ready condition true", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionTrue, }, @@ -277,7 +277,7 @@ func TestIsNodeReady(t *testing.T) { name: "ready condition false", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionFalse, }, @@ -289,7 +289,7 @@ func TestIsNodeReady(t *testing.T) { name: "ready condition unknown", node: &corev1.Node{Status: corev1.NodeStatus{ Conditions: []corev1.NodeCondition{ - corev1.NodeCondition{ + { Type: corev1.NodeReady, Status: corev1.ConditionUnknown, }, diff --git a/pkg/errors/deployer.go b/pkg/errors/deployer.go index 408871ecca84..45fb789ada15 100644 --- a/pkg/errors/deployer.go +++ b/pkg/errors/deployer.go @@ -18,4 +18,4 @@ package errors import "github.com/pkg/errors" -var NotImplementedError = errors.New("not implemented") +var ErrNotImplemented = errors.New("not implemented") diff --git a/pkg/kubeadm/kubeadm.go b/pkg/kubeadm/kubeadm.go index e6cb3dc84b2b..c1748a8a93c1 100644 --- a/pkg/kubeadm/kubeadm.go +++ b/pkg/kubeadm/kubeadm.go @@ -20,7 +20,7 @@ import ( "strings" "time" - "sigs.k8s.io/cluster-api/pkg/cmd-runner" + cmd_runner "sigs.k8s.io/cluster-api/pkg/cmd-runner" ) // The purpose of Kubeadm and this file is to provide a unit tested wrapper around the 'kubeadm' exec command. Higher @@ -37,7 +37,7 @@ type TokenCreateParams struct { Help bool KubeConfig string PrintJoinCommand bool - Ttl time.Duration + TTL time.Duration Usages []string } @@ -61,9 +61,9 @@ func (k *Kubeadm) TokenCreate(params TokenCreateParams) (string, error) { args = appendFlagIfTrue(args, "--help", params.Help) args = appendStringParamIfPresent(args, "--kubeconfig", params.KubeConfig) args = appendFlagIfTrue(args, "--print-join-command", params.PrintJoinCommand) - if params.Ttl != time.Duration(0) { + if params.TTL != time.Duration(0) { args = append(args, "--ttl") - args = append(args, params.Ttl.String()) + args = append(args, params.TTL.String()) } args = appendStringSliceIfValid(args, "--usages", params.Usages) return k.runner.CombinedOutput("kubeadm", args...) diff --git a/pkg/kubeadm/kubeadm_test.go b/pkg/kubeadm/kubeadm_test.go index 6b50130e479d..9e385456dfb9 100644 --- a/pkg/kubeadm/kubeadm_test.go +++ b/pkg/kubeadm/kubeadm_test.go @@ -24,7 +24,7 @@ import ( "time" "sigs.k8s.io/cluster-api/pkg/kubeadm" - "sigs.k8s.io/cluster-api/pkg/test-cmd-runner" + test_cmd_runner "sigs.k8s.io/cluster-api/pkg/test-cmd-runner" ) var ( @@ -52,14 +52,14 @@ func TestTokenCreateParameters(t *testing.T) { {"groups", nil, "kubeadm token create --groups system:bootstrappers:kubeadm:default-node-token", kubeadm.TokenCreateParams{Groups: []string{"system", "bootstrappers", "kubeadm", "default-node-token"}}}, {"help", nil, "kubeadm token create --help", kubeadm.TokenCreateParams{Help: true}}, {"print join command", nil, "kubeadm token create --print-join-command", kubeadm.TokenCreateParams{PrintJoinCommand: true}}, - {"ttl 24 hours", nil, "kubeadm token create --ttl 24h0m0s", kubeadm.TokenCreateParams{Ttl: toDuration(24, 0, 0)}}, - {"ttl 55 minutes", nil, "kubeadm token create --ttl 55m0s", kubeadm.TokenCreateParams{Ttl: toDuration(0, 55, 0)}}, - {"ttl 9 seconds", nil, "kubeadm token create --ttl 9s", kubeadm.TokenCreateParams{Ttl: toDuration(0, 0, 9)}}, - {"ttl 1 second", nil, "kubeadm token create --ttl 1s", kubeadm.TokenCreateParams{Ttl: toDuration(0, 0, 1)}}, - {"ttl 16 hours, 38 minutes, 2 seconds", nil, "kubeadm token create --ttl 16h38m2s", kubeadm.TokenCreateParams{Ttl: toDuration(16, 38, 2)}}, + {"ttl 24 hours", nil, "kubeadm token create --ttl 24h0m0s", kubeadm.TokenCreateParams{TTL: toDuration(24, 0, 0)}}, + {"ttl 55 minutes", nil, "kubeadm token create --ttl 55m0s", kubeadm.TokenCreateParams{TTL: toDuration(0, 55, 0)}}, + {"ttl 9 seconds", nil, "kubeadm token create --ttl 9s", kubeadm.TokenCreateParams{TTL: toDuration(0, 0, 9)}}, + {"ttl 1 second", nil, "kubeadm token create --ttl 1s", kubeadm.TokenCreateParams{TTL: toDuration(0, 0, 1)}}, + {"ttl 16 hours, 38 minutes, 2 seconds", nil, "kubeadm token create --ttl 16h38m2s", kubeadm.TokenCreateParams{TTL: toDuration(16, 38, 2)}}, {"usages", nil, "kubeadm token create --usages signing:authentication", kubeadm.TokenCreateParams{Usages: []string{"signing", "authentication"}}}, {"all", nil, "kubeadm token create --config /my/config --description my description --groups bootstrappers --help --print-join-command --ttl 1h1m1s --usages authentication", - kubeadm.TokenCreateParams{Config: "/my/config", Description: "my description", Groups: []string{"bootstrappers"}, Help: true, PrintJoinCommand: true, Ttl: toDuration(1, 1, 1), Usages: []string{"authentication"}}}, + kubeadm.TokenCreateParams{Config: "/my/config", Description: "my description", Groups: []string{"bootstrappers"}, Help: true, PrintJoinCommand: true, TTL: toDuration(1, 1, 1), Usages: []string{"authentication"}}}, } kadm := kubeadm.NewWithCmdRunner(test_cmd_runner.NewTestRunnerFailOnErr(t, echoCallback)) for _, tst := range tests { diff --git a/pkg/util/util.go b/pkg/util/util.go index d27978141766..dc2a1514a63d 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -66,7 +66,7 @@ func GetControlPlaneMachine(machines []*clusterv1.Machine) *clusterv1.Machine { func MachineP(machines []clusterv1.Machine) []*clusterv1.Machine { // Convert to list of pointers - var ret []*clusterv1.Machine + ret := make([]*clusterv1.Machine, 0, len(machines)) for _, machine := range machines { ret = append(ret, machine.DeepCopy()) } diff --git a/scripts/ci-make.sh b/scripts/ci-make.sh index 71de2f689b71..7c55a20df6b4 100755 --- a/scripts/ci-make.sh +++ b/scripts/ci-make.sh @@ -20,4 +20,4 @@ set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE}")/.. -cd $REPO_ROOT && make docker-build +cd $REPO_ROOT && make lint-full docker-build