diff --git a/.golangci.yml b/.golangci.yml index 4b161837c939..db1cb29f70c4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,11 +23,10 @@ linters: - nolintlint - prealloc - rowserrcheck - - scopelint + - exportloopref - staticcheck - structcheck - stylecheck - - testpackage - typecheck - unconvert - unparam @@ -43,20 +42,44 @@ issues: exclude-use-default: false # List of regexps of issue texts to exclude, empty list by default. exclude: - - Using the variable on range scope `(tc)|(rt)|(tt)|(test)|(testcase)|(testCase)` in function literal - - "G108: Profiling endpoint is automatically exposed on /debug/pprof" - - Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked - # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. - # If it is decided they will not be addressed they should be moved above this comment. - - Subprocess launch(ed with variable|ing should be audited) - - (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less) - - (G104|G307) + - "G108: Profiling endpoint is automatically exposed on /debug/pprof" + - Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + - exported method `.*.Reconcile` should have comment or be unexported + - exported method `.*.SetupWithManager` should have comment or be unexported + # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. + # If it is decided they will not be addressed they should be moved above this comment. + - Subprocess launch(ed with variable|ing should be audited) + - (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less) + - (G104|G307) + - exported (method|function|type|const) (.+) should have comment or be unexported + exclude-rules: + # With Go 1.16, the new embed directive can be used with an un-named import, + # golint only allows these to be imported in a main.go, which wouldn't work for us. + # This directive allows the embed package to be imported with an underscore everywhere. + - linters: + - golint + source: _ "embed" + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam + text: always receives + # Dot imports for gomega or ginkgo are allowed + # within test files. + - path: _test\.go + text: should not use dot imports + - path: _test\.go + text: cyclomatic complexity + - path: test/framework.*.go + text: should not use dot imports + - path: test/e2e.*.go + text: should not use dot imports run: timeout: 10m skip-files: - - "zz_generated.*\\.go$" - - ".*conversion.*\\.go$" + - "zz_generated.*\\.go$" + - ".*conversion.*\\.go$" skip-dirs: - - third_party + - third_party allow-parallel-runners: true diff --git a/Makefile b/Makefile index 891edec7747f..34d9624d8363 100644 --- a/Makefile +++ b/Makefile @@ -215,16 +215,7 @@ e2e-framework: ## Builds the CAPI e2e framework .PHONY: lint lint: $(GOLANGCI_LINT) ## Lint codebase - $(MAKE) -j8 lint-all - -.PHONY: lint-all lint-core lint-e2e lint-capd -lint-all: lint-core lint-e2e lint-capd -lint-core: $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) -lint-e2e: - cd $(E2E_FRAMEWORK_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) -lint-capd: - cd $(CAPD_DIR); $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) .PHONY: lint-fix lint-fix: $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linter. diff --git a/api/v1alpha3/cluster_types.go b/api/v1alpha3/cluster_types.go index 1491f5151a42..574d81dd6cf1 100644 --- a/api/v1alpha3/cluster_types.go +++ b/api/v1alpha3/cluster_types.go @@ -29,6 +29,8 @@ import ( ) const ( + // ClusterFinalizer is the finalizer used by the cluster controller to + // cleanup the cluster resources when a Cluster is being deleted. ClusterFinalizer = "cluster.cluster.x-k8s.io" ) @@ -87,6 +89,7 @@ type ClusterNetwork struct { // ANCHOR_END: ClusterNetwork // ANCHOR: NetworkRanges + // NetworkRanges represents ranges of network addresses. type NetworkRanges struct { CIDRBlocks []string `json:"cidrBlocks"` diff --git a/api/v1alpha3/common_types.go b/api/v1alpha3/common_types.go index 3534215398b1..5f9c0f581841 100644 --- a/api/v1alpha3/common_types.go +++ b/api/v1alpha3/common_types.go @@ -53,13 +53,16 @@ const ( // MachineAddressType describes a valid MachineAddress type. type MachineAddressType string +// Define all the constants related to MachineAddressType. const ( MachineHostName MachineAddressType = "Hostname" MachineExternalIP MachineAddressType = "ExternalIP" MachineInternalIP MachineAddressType = "InternalIP" MachineExternalDNS MachineAddressType = "ExternalDNS" MachineInternalDNS MachineAddressType = "InternalDNS" +) +const ( // MachineNodeNameIndex is used by the Machine Controller to index Machines by Node name, and add a watch on Nodes. MachineNodeNameIndex = "status.nodeRef.name" ) diff --git a/api/v1alpha3/condition_consts.go b/api/v1alpha3/condition_consts.go index a18955d04f5c..65393255f430 100644 --- a/api/v1alpha3/condition_consts.go +++ b/api/v1alpha3/condition_consts.go @@ -55,7 +55,7 @@ const ( // Conditions and condition Reasons for the Cluster object const ( - // ControlPlaneReady reports the ready condition from the control plane object defined for this cluster. + // ControlPlaneReadyCondition reports the ready condition from the control plane object defined for this cluster. // This condition is mirrored from the Ready condition in the control plane ref object, and // the absence of this condition might signal problems in the reconcile external loops or the fact that // the control plane provider does not not implements the Ready condition yet. @@ -173,7 +173,7 @@ const ( // allowed to remediate any Machines or whether it is blocked from remediating any further. RemediationAllowedCondition ConditionType = "RemediationAllowed" - // TooManyUnhealthy is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked + // TooManyUnhealthyReason is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked // from making any further remediations. TooManyUnhealthyReason = "TooManyUnhealthy" ) diff --git a/api/v1alpha3/machinedeployment_types.go b/api/v1alpha3/machinedeployment_types.go index c4f1a22d7125..717b56b453c3 100644 --- a/api/v1alpha3/machinedeployment_types.go +++ b/api/v1alpha3/machinedeployment_types.go @@ -24,7 +24,7 @@ import ( type MachineDeploymentStrategyType string const ( - // Replace the old MachineSet by new one using rolling update + // RollingUpdateMachineDeploymentStrategyType replaces the old MachineSet by new one using rolling update // i.e. gradually scale down the old MachineSet and scale up the new one. RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" diff --git a/api/v1alpha4/cluster_types.go b/api/v1alpha4/cluster_types.go index 93ccadc7322f..7819ed119b6d 100644 --- a/api/v1alpha4/cluster_types.go +++ b/api/v1alpha4/cluster_types.go @@ -30,6 +30,8 @@ import ( ) const ( + // ClusterFinalizer is the finalizer used by the cluster controller to + // cleanup the cluster resources when a Cluster is being deleted. ClusterFinalizer = "cluster.cluster.x-k8s.io" ) @@ -88,6 +90,7 @@ type ClusterNetwork struct { // ANCHOR_END: ClusterNetwork // ANCHOR: NetworkRanges + // NetworkRanges represents ranges of network addresses. type NetworkRanges struct { CIDRBlocks []string `json:"cidrBlocks"` @@ -286,6 +289,7 @@ func ipFamilyForCIDRStrings(cidrs []string) (ClusterIPFamily, error) { type ClusterIPFamily int +// Define the ClusterIPFamily constants. const ( InvalidIPFamily ClusterIPFamily = iota IPv4IPFamily diff --git a/api/v1alpha4/common_types.go b/api/v1alpha4/common_types.go index 2b57dca76ce6..324bca2e03aa 100644 --- a/api/v1alpha4/common_types.go +++ b/api/v1alpha4/common_types.go @@ -102,13 +102,16 @@ var ( // MachineAddressType describes a valid MachineAddress type. type MachineAddressType string +// Define the MachineAddressType constants. const ( MachineHostName MachineAddressType = "Hostname" MachineExternalIP MachineAddressType = "ExternalIP" MachineInternalIP MachineAddressType = "InternalIP" MachineExternalDNS MachineAddressType = "ExternalDNS" MachineInternalDNS MachineAddressType = "InternalDNS" +) +const ( // MachineNodeNameIndex is used by the Machine Controller to index Machines by Node name, and add a watch on Nodes. MachineNodeNameIndex = "status.nodeRef.name" ) diff --git a/api/v1alpha4/condition_consts.go b/api/v1alpha4/condition_consts.go index df36e5a0d4d5..55e610cb280d 100644 --- a/api/v1alpha4/condition_consts.go +++ b/api/v1alpha4/condition_consts.go @@ -72,7 +72,7 @@ const ( // provider to report successful control plane initialization. WaitingForControlPlaneProviderInitializedReason = "WaitingForControlPlaneProviderInitialized" - // ControlPlaneReady reports the ready condition from the control plane object defined for this cluster. + // ControlPlaneReadyCondition reports the ready condition from the control plane object defined for this cluster. // This condition is mirrored from the Ready condition in the control plane ref object, and // the absence of this condition might signal problems in the reconcile external loops or the fact that // the control plane provider does not not implements the Ready condition yet. @@ -197,7 +197,7 @@ const ( // allowed to remediate any Machines or whether it is blocked from remediating any further. RemediationAllowedCondition ConditionType = "RemediationAllowed" - // TooManyUnhealthy is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked + // TooManyUnhealthyReason is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked // from making any further remediations. TooManyUnhealthyReason = "TooManyUnhealthy" ) diff --git a/api/v1alpha4/machine_webhook.go b/api/v1alpha4/machine_webhook.go index af269e9f1c69..7de314bd879f 100644 --- a/api/v1alpha4/machine_webhook.go +++ b/api/v1alpha4/machine_webhook.go @@ -18,9 +18,10 @@ package v1alpha4 import ( "fmt" - "sigs.k8s.io/cluster-api/util/version" "strings" + "sigs.k8s.io/cluster-api/util/version" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" diff --git a/api/v1alpha4/machinedeployment_types.go b/api/v1alpha4/machinedeployment_types.go index 1cb6cf56a0a8..3d458c1a48a6 100644 --- a/api/v1alpha4/machinedeployment_types.go +++ b/api/v1alpha4/machinedeployment_types.go @@ -24,7 +24,7 @@ import ( type MachineDeploymentStrategyType string const ( - // Replace the old MachineSet by new one using rolling update + // RollingUpdateMachineDeploymentStrategyType replaces the old MachineSet by new one using rolling update // i.e. gradually scale down the old MachineSet and scale up the new one. RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" @@ -33,12 +33,15 @@ const ( // RevisionAnnotation is the revision annotation of a machine deployment's machine sets which records its rollout sequence. RevisionAnnotation = "machinedeployment.clusters.x-k8s.io/revision" + // RevisionHistoryAnnotation maintains the history of all old revisions that a machine set has served for a machine deployment. RevisionHistoryAnnotation = "machinedeployment.clusters.x-k8s.io/revision-history" + // DesiredReplicasAnnotation is the desired replicas for a machine deployment recorded as an annotation // in its machine sets. Helps in separating scaling events from the rollout process and for // determining if the new machine set for a deployment is really saturated. DesiredReplicasAnnotation = "machinedeployment.clusters.x-k8s.io/desired-replicas" + // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which // is machinedeployment.spec.replicas + maxSurge. Used by the underlying machine sets to estimate their // proportions in case the deployment has surge replicas. diff --git a/api/v1alpha4/machinedeployment_webhook.go b/api/v1alpha4/machinedeployment_webhook.go index 14f698e11134..4e7cf56ede82 100644 --- a/api/v1alpha4/machinedeployment_webhook.go +++ b/api/v1alpha4/machinedeployment_webhook.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/labels" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" @@ -100,7 +99,7 @@ func (m *MachineDeployment) validate(old *MachineDeployment) error { } if m.Spec.Strategy.RollingUpdate.MaxSurge != nil { - if _, err := intstrutil.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxSurge, total, true); err != nil { + if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxSurge, total, true); err != nil { allErrs = append( allErrs, field.Invalid(field.NewPath("spec", "strategy", "rollingUpdate", "maxSurge"), @@ -110,7 +109,7 @@ func (m *MachineDeployment) validate(old *MachineDeployment) error { } if m.Spec.Strategy.RollingUpdate.MaxUnavailable != nil { - if _, err := intstrutil.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxUnavailable, total, true); err != nil { + if _, err := intstr.GetScaledValueFromIntOrPercent(m.Spec.Strategy.RollingUpdate.MaxUnavailable, total, true); err != nil { allErrs = append( allErrs, field.Invalid(field.NewPath("spec", "strategy", "rollingUpdate", "maxUnavailable"), diff --git a/api/v1alpha4/machineset_webhook.go b/api/v1alpha4/machineset_webhook.go index d26adcf647dc..568f8cbeb93e 100644 --- a/api/v1alpha4/machineset_webhook.go +++ b/api/v1alpha4/machineset_webhook.go @@ -40,7 +40,7 @@ func (m *MachineSet) SetupWebhookWithManager(mgr ctrl.Manager) error { var _ webhook.Defaulter = &MachineSet{} var _ webhook.Validator = &MachineSet{} -// DefaultingFunction sets default MachineSet field values. +// Default sets default MachineSet field values. func (m *MachineSet) Default() { if m.Labels == nil { m.Labels = make(map[string]string) diff --git a/bootstrap/kubeadm/api/v1alpha3/kubeadmconfig_types.go b/bootstrap/kubeadm/api/v1alpha3/kubeadmconfig_types.go index 79877c3de9bd..a38d5b4d4e88 100644 --- a/bootstrap/kubeadm/api/v1alpha3/kubeadmconfig_types.go +++ b/bootstrap/kubeadm/api/v1alpha3/kubeadmconfig_types.go @@ -211,7 +211,7 @@ type FileSource struct { Secret SecretFileSource `json:"secret"` } -// Adapts a Secret into a FileSource. +// SecretFileSource adapts a Secret into a FileSource. // // The contents of the target Secret's Data field will be presented // as files using the keys in the Data field as the file names. diff --git a/bootstrap/kubeadm/api/v1alpha4/kubeadm_types.go b/bootstrap/kubeadm/api/v1alpha4/kubeadm_types.go index 16957a993eaf..a4fd713ed0cb 100644 --- a/bootstrap/kubeadm/api/v1alpha4/kubeadm_types.go +++ b/bootstrap/kubeadm/api/v1alpha4/kubeadm_types.go @@ -412,11 +412,12 @@ type HostPathMount struct { PathType corev1.HostPathType `json:"pathType,omitempty"` } -// +kubebuilder:validation:Type=string // BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used // for both validation of the practically of the API server from a joining node's point // of view and as an authentication method for the node in the bootstrap phase of // "kubeadm join". This token is and should be short-lived. +// +// +kubebuilder:validation:Type=string type BootstrapTokenString struct { ID string `json:"-"` Secret string `json:"-"` diff --git a/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_types.go b/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_types.go index a941145cb607..16aea686b70c 100644 --- a/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_types.go +++ b/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_types.go @@ -204,7 +204,7 @@ type FileSource struct { Secret SecretFileSource `json:"secret"` } -// Adapts a Secret into a FileSource. +// SecretFileSource adapts a Secret into a FileSource. // // The contents of the target Secret's Data field will be presented // as files using the keys in the Data field as the file names. diff --git a/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_webhook.go b/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_webhook.go index b09c1c43464a..77f5c9faa866 100644 --- a/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_webhook.go +++ b/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_webhook.go @@ -27,10 +27,10 @@ import ( ) var ( - ConflictingFileSourceMsg = "only one of content or contentFrom may be specified for a single file" - MissingSecretNameMsg = "secret file source must specify non-empty secret name" - MissingSecretKeyMsg = "secret file source must specify non-empty secret key" - PathConflictMsg = "path property must be unique among all files" + conflictingFileSourceMsg = "only one of content or contentFrom may be specified for a single file" + missingSecretNameMsg = "secret file source must specify non-empty secret name" + missingSecretKeyMsg = "secret file source must specify non-empty secret key" + pathConflictMsg = "path property must be unique among all files" ) func (c *KubeadmConfig) SetupWebhookWithManager(mgr ctrl.Manager) error { @@ -71,7 +71,7 @@ func (c *KubeadmConfigSpec) validate(name string) error { field.Invalid( field.NewPath("spec", "files", fmt.Sprintf("%d", i)), file, - ConflictingFileSourceMsg, + conflictingFileSourceMsg, ), ) } @@ -85,7 +85,7 @@ func (c *KubeadmConfigSpec) validate(name string) error { field.Invalid( field.NewPath("spec", "files", fmt.Sprintf("%d", i), "contentFrom", "secret", "name"), file, - MissingSecretNameMsg, + missingSecretNameMsg, ), ) } @@ -95,7 +95,7 @@ func (c *KubeadmConfigSpec) validate(name string) error { field.Invalid( field.NewPath("spec", "files", fmt.Sprintf("%d", i), "contentFrom", "secret", "key"), file, - MissingSecretKeyMsg, + missingSecretKeyMsg, ), ) } @@ -107,7 +107,7 @@ func (c *KubeadmConfigSpec) validate(name string) error { field.Invalid( field.NewPath("spec", "files", fmt.Sprintf("%d", i), "path"), file, - PathConflictMsg, + pathConflictMsg, ), ) } diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go index a297a8ea8e1a..28bb556fca81 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go @@ -190,7 +190,7 @@ func TestKubeadmConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) machine := newMachine(cluster, "machine") config := newKubeadmConfig(machine, "cfg") - //cluster infra not ready + // cluster infra not ready cluster.Status = clusterv1.ClusterStatus{ InfrastructureReady: false, } diff --git a/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go b/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go index 2c52415b81cb..4538c4a52d48 100644 --- a/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go +++ b/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go @@ -23,7 +23,6 @@ import ( "k8s.io/utils/pointer" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" - infrav1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/secret" ) @@ -36,10 +35,10 @@ func TestNewInitControlPlaneAdditionalFileEncodings(t *testing.T) { Header: "test", PreKubeadmCommands: nil, PostKubeadmCommands: nil, - AdditionalFiles: []infrav1.File{ + AdditionalFiles: []bootstrapv1.File{ { Path: "/tmp/my-path", - Encoding: infrav1.Base64, + Encoding: bootstrapv1.Base64, Content: "aGk=", }, { @@ -167,7 +166,7 @@ func TestNewInitControlPlaneDiskMounts(t *testing.T) { - label: test_disk filesystem: ext4 device: test-device - extra_opts: + extra_opts: - -F - -E - lazy_itable_init=1,lazy_journal_init=1` @@ -175,7 +174,7 @@ func TestNewInitControlPlaneDiskMounts(t *testing.T) { - - test_disk - /var/lib/testdir` - g.Expect(out).To(ContainSubstring(expectedDiskSetup)) - g.Expect(out).To(ContainSubstring(expectedFSSetup)) - g.Expect(out).To(ContainSubstring(expectedMounts)) + g.Expect(string(out)).To(ContainSubstring(expectedDiskSetup)) + g.Expect(string(out)).To(ContainSubstring(expectedFSSetup)) + g.Expect(string(out)).To(ContainSubstring(expectedMounts)) } diff --git a/bootstrap/kubeadm/internal/cloudinit/fs_setup.go b/bootstrap/kubeadm/internal/cloudinit/fs_setup.go index ecfb2291f71c..157ce6eac12d 100644 --- a/bootstrap/kubeadm/internal/cloudinit/fs_setup.go +++ b/bootstrap/kubeadm/internal/cloudinit/fs_setup.go @@ -33,7 +33,7 @@ fs_setup:{{ range .Filesystems }} replace_fs: {{ .ReplaceFS }} {{- end }} {{- if .ExtraOpts }} - extra_opts: {{ range .ExtraOpts }} + extra_opts: {{- range .ExtraOpts }} - {{ . }} {{- end -}} {{- end -}} diff --git a/bootstrap/kubeadm/types/v1beta1/bootstraptokenstring.go b/bootstrap/kubeadm/types/v1beta1/bootstraptokenstring.go index 09c43c1e9e28..ab57e71b7576 100644 --- a/bootstrap/kubeadm/types/v1beta1/bootstraptokenstring.go +++ b/bootstrap/kubeadm/types/v1beta1/bootstraptokenstring.go @@ -26,11 +26,12 @@ import ( bootstraputil "k8s.io/cluster-bootstrap/token/util" ) -// +kubebuilder:validation:Type=string // BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used // for both validation of the practically of the API server from a joining node's point // of view and as an authentication method for the node in the bootstrap phase of // "kubeadm join". This token is and should be short-lived. +// +// +kubebuilder:validation:Type=string type BootstrapTokenString struct { ID string `json:"-"` Secret string `json:"-"` diff --git a/bootstrap/kubeadm/types/v1beta2/bootstraptokenstring.go b/bootstrap/kubeadm/types/v1beta2/bootstraptokenstring.go index aa1f51ab6db9..7efbf59c241e 100644 --- a/bootstrap/kubeadm/types/v1beta2/bootstraptokenstring.go +++ b/bootstrap/kubeadm/types/v1beta2/bootstraptokenstring.go @@ -26,11 +26,12 @@ import ( bootstraputil "k8s.io/cluster-bootstrap/token/util" ) -// +kubebuilder:validation:Type=string // BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used // for both validation of the practically of the API server from a joining node's point // of view and as an authentication method for the node in the bootstrap phase of // "kubeadm join". This token is and should be short-lived. +// +// +kubebuilder:validation:Type=string type BootstrapTokenString struct { ID string `json:"-"` Secret string `json:"-"` diff --git a/bootstrap/util/configowner.go b/bootstrap/util/configowner.go index b38f39286bfe..ec524cdaf86f 100644 --- a/bootstrap/util/configowner.go +++ b/bootstrap/util/configowner.go @@ -82,7 +82,7 @@ func (co ConfigOwner) IsMachinePool() bool { return co.GetKind() == "MachinePool" } -// Returns the Kuberentes version for the config owner object. +// KubernetesVersion returns the Kuberentes version for the config owner object. func (co ConfigOwner) KubernetesVersion() string { fields := []string{"spec", "version"} if co.IsMachinePool() { diff --git a/cmd/clusterctl/client/cluster/cert_manager.go b/cmd/clusterctl/client/cluster/cert_manager.go index d5fe4da4f3ca..16a0d166aca1 100644 --- a/cmd/clusterctl/client/cluster/cert_manager.go +++ b/cmd/clusterctl/client/cluster/cert_manager.go @@ -54,6 +54,8 @@ var ( certManagerManifest []byte //go:embed assets/cert-manager-test-resources.yaml certManagerTestManifest []byte + + certManagerRegexp = regexp.MustCompile("(?:quay.io/jetstack/cert-manager-controller:)(.*)") ) // CertManagerUpgradePlan defines the upgrade plan if cert-manager needs to be @@ -93,18 +95,12 @@ type certManagerClient struct { // Ensure certManagerClient implements the CertManagerClient interface. var _ CertManagerClient = &certManagerClient{} -func (cm *certManagerClient) setManifestHash() error { +func (cm *certManagerClient) setManifestHash() { cm.embeddedCertManagerManifestHash = fmt.Sprintf("%x", sha256.Sum256(certManagerManifest)) - return nil } func (cm *certManagerClient) setManifestVersion() error { - r, err := regexp.Compile("(?:quay.io/jetstack/cert-manager-controller:)(.*)") - if err != nil { - return err - } - - if match := r.FindStringSubmatch(string(certManagerManifest)); len(match) > 0 { + if match := certManagerRegexp.FindStringSubmatch(string(certManagerManifest)); len(match) > 0 { cm.embeddedCertManagerManifestVersion = match[1] return nil } @@ -123,10 +119,7 @@ func newCertManagerClient(configClient config.Client, proxy Proxy, pollImmediate return nil, err } - err = cm.setManifestHash() - if err != nil { - return nil, err - } + cm.setManifestHash() return cm, nil } @@ -458,7 +451,7 @@ func (cm *certManagerClient) deleteObj(obj unstructured.Unstructured) error { // cert-manager API group. // If retry is true, the createObj call will be retried if it fails. Otherwise, the // 'create' operations will only be attempted once. -func (cm *certManagerClient) waitForAPIReady(ctx context.Context, retry bool) error { +func (cm *certManagerClient) waitForAPIReady(_ context.Context, retry bool) error { log := logf.Log // Waits for for the cert-manager to be available. if retry { diff --git a/cmd/clusterctl/client/cluster/components.go b/cmd/clusterctl/client/cluster/components.go index 445dde3ad078..5290b159f675 100644 --- a/cmd/clusterctl/client/cluster/components.go +++ b/cmd/clusterctl/client/cluster/components.go @@ -99,7 +99,7 @@ func (p *providerComponents) createObj(obj unstructured.Unstructured) error { return errors.Wrapf(err, "failed to get current provider object") } - //if it does not exists, create the component + // if it does not exists, create the component log.V(5).Info("Creating", logf.UnstructuredToValues(obj)...) if err := c.Create(ctx, &obj); err != nil { return errors.Wrapf(err, "failed to create provider object %s, %s/%s", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName()) diff --git a/cmd/clusterctl/client/cluster/components_test.go b/cmd/clusterctl/client/cluster/components_test.go index bff66bdcb376..c2b7ed85fe2d 100644 --- a/cmd/clusterctl/client/cluster/components_test.go +++ b/cmd/clusterctl/client/cluster/components_test.go @@ -97,7 +97,7 @@ func Test_providerComponents_Delete(t *testing.T) { Name: repository.WebhookNamespaceName, Labels: map[string]string{ clusterctlv1.ClusterctlResourceLifecyleLabelName: string(clusterctlv1.ResourceLifecycleShared), - //NB. the capi-webhook-system namespace doe not have a provider label (see fixSharedLabels) + // NB. the capi-webhook-system namespace doe not have a provider label (see fixSharedLabels) }, }, }, diff --git a/cmd/clusterctl/client/cluster/installer_test.go b/cmd/clusterctl/client/cluster/installer_test.go index c34c307f587d..f3964ffaf974 100644 --- a/cmd/clusterctl/client/cluster/installer_test.go +++ b/cmd/clusterctl/client/cluster/installer_test.go @@ -107,7 +107,7 @@ func Test_providerInstaller_Validate(t *testing.T) { { name: "install core/current contract + infra1/current contract on an empty cluster", fields: fields{ - proxy: test.NewFakeProxy(), //empty cluster + proxy: test.NewFakeProxy(), // empty cluster installQueue: []repository.Components{ newFakeComponents("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", ""), newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra1-system", ""), @@ -190,7 +190,7 @@ func Test_providerInstaller_Validate(t *testing.T) { { name: "install core/previous contract + infra1/previous contract on an empty cluster (not supported)", fields: fields{ - proxy: test.NewFakeProxy(), //empty cluster + proxy: test.NewFakeProxy(), // empty cluster installQueue: []repository.Components{ newFakeComponents("cluster-api", clusterctlv1.CoreProviderType, "v0.9.0", "cluster-api-system", ""), newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v0.9.0", "infra1-system", ""), @@ -201,7 +201,7 @@ func Test_providerInstaller_Validate(t *testing.T) { { name: "install core/previous contract + infra1/current contract on an empty cluster (not supported)", fields: fields{ - proxy: test.NewFakeProxy(), //empty cluster + proxy: test.NewFakeProxy(), // empty cluster installQueue: []repository.Components{ newFakeComponents("cluster-api", clusterctlv1.CoreProviderType, "v0.9.0", "cluster-api-system", ""), newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra1-system", ""), @@ -223,7 +223,7 @@ func Test_providerInstaller_Validate(t *testing.T) { { name: "install core/next contract + infra1/next contract on an empty cluster (not supported)", fields: fields{ - proxy: test.NewFakeProxy(), //empty cluster + proxy: test.NewFakeProxy(), // empty cluster installQueue: []repository.Components{ newFakeComponents("cluster-api", clusterctlv1.CoreProviderType, "v2.0.0", "cluster-api-system", ""), newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra1-system", ""), @@ -234,7 +234,7 @@ func Test_providerInstaller_Validate(t *testing.T) { { name: "install core/current contract + infra1/next contract on an empty cluster (not supported)", fields: fields{ - proxy: test.NewFakeProxy(), //empty cluster + proxy: test.NewFakeProxy(), // empty cluster installQueue: []repository.Components{ newFakeComponents("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system", ""), newFakeComponents("infra1", clusterctlv1.InfrastructureProviderType, "v2.0.0", "infra1-system", ""), diff --git a/cmd/clusterctl/client/cluster/inventory.go b/cmd/clusterctl/client/cluster/inventory.go index 5af32d8de4d7..e20abf747118 100644 --- a/cmd/clusterctl/client/cluster/inventory.go +++ b/cmd/clusterctl/client/cluster/inventory.go @@ -278,7 +278,7 @@ func (p *inventoryClient) Create(m clusterctlv1.Provider) error { return errors.Wrapf(err, "failed to get current provider object") } - //if it does not exists, create the provider object + // if it does not exists, create the provider object if err := cl.Create(ctx, &m); err != nil { return errors.Wrapf(err, "failed to create provider object") } diff --git a/cmd/clusterctl/client/cluster/inventory_managementgroup_test.go b/cmd/clusterctl/client/cluster/inventory_managementgroup_test.go index 01b4cb441865..5de1f352162e 100644 --- a/cmd/clusterctl/client/cluster/inventory_managementgroup_test.go +++ b/cmd/clusterctl/client/cluster/inventory_managementgroup_test.go @@ -112,7 +112,7 @@ func Test_inventoryClient_GetManagementGroups(t *testing.T) { }, { name: "fails with overlapping core providers", - fields: fields{ //two core providers watching for the same namespaces + fields: fields{ // two core providers watching for the same namespaces proxy: test.NewFakeProxy(). WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system1", ""). WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system2", ""), @@ -122,7 +122,7 @@ func Test_inventoryClient_GetManagementGroups(t *testing.T) { }, { name: "fails with overlapping core providers", - fields: fields{ //a provider watching for objects controlled by more than one core provider + fields: fields{ // a provider watching for objects controlled by more than one core provider proxy: test.NewFakeProxy(). WithProviderInventory("infrastructure", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra-system", ""). WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system1", "ns1"). @@ -133,7 +133,7 @@ func Test_inventoryClient_GetManagementGroups(t *testing.T) { }, { name: "fails with orphan providers", - fields: fields{ //a provider watching for objects not controlled any core provider + fields: fields{ // a provider watching for objects not controlled any core provider proxy: test.NewFakeProxy(). WithProviderInventory("infrastructure", clusterctlv1.InfrastructureProviderType, "v1.0.0", "infra-system", "ns1"). WithProviderInventory("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system1", "ns2"), diff --git a/cmd/clusterctl/client/cluster/inventory_test.go b/cmd/clusterctl/client/cluster/inventory_test.go index 44e5a492d835..08c58e8d9ec9 100644 --- a/cmd/clusterctl/client/cluster/inventory_test.go +++ b/cmd/clusterctl/client/cluster/inventory_test.go @@ -67,7 +67,7 @@ func Test_inventoryClient_CheckInventoryCRDs(t *testing.T) { proxy := test.NewFakeProxy() p := newInventoryClient(proxy, fakePollImmediateWaiter) if tt.fields.alreadyHasCRD { - //forcing creation of metadata before test + // forcing creation of metadata before test g.Expect(p.EnsureCustomResourceDefinitions()).To(Succeed()) } diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index 8af8533ce86a..96b887e33bc1 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -652,7 +652,7 @@ func (o *objectMover) deleteSourceObject(nodeToDelete *node) error { if err := cFrom.Get(ctx, sourceObjKey, sourceObj); err != nil { if apierrors.IsNotFound(err) { - //If the object is already deleted, move on. + // If the object is already deleted, move on. log.V(5).Info("Object already deleted, skipping delete for", nodeToDelete.identity.Kind, nodeToDelete.identity.Name, "Namespace", nodeToDelete.identity.Namespace) return nil } diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index 7ed5b4347633..9fd57ab53be1 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -47,10 +47,10 @@ var moveTests = []struct { objs: test.NewFakeCluster("ns1", "foo").Objs(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/foo", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/foo-ca", "/v1, Kind=Secret, ns1/foo-kubeconfig", @@ -65,12 +65,12 @@ var moveTests = []struct { objs: test.NewFakeCluster("ns1", "foo").WithCloudConfigSecret().Objs(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/foo", // objects with force move flag "/v1, Kind=Secret, ns1/foo-cloud-config", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/foo-ca", "/v1, Kind=Secret, ns1/foo-kubeconfig", @@ -89,10 +89,10 @@ var moveTests = []struct { ).Objs(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/cluster1-kubeconfig", "/v1, Kind=Secret, ns1/cluster1-ca", @@ -100,14 +100,14 @@ var moveTests = []struct { "cluster.x-k8s.io/v1alpha4, Kind=Machine, ns1/m2", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureCluster, ns1/cluster1", }, - { //group 3 (objects with ownerReferences in group 1,2) + { // group 3 (objects with ownerReferences in group 1,2) // owned by Machines "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/m1", "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/m2", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/m1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/m2", }, - { //group 4 (objects with ownerReferences in group 1,2,3) + { // group 4 (objects with ownerReferences in group 1,2,3) // owned by GenericBootstrapConfigs "/v1, Kind=Secret, ns1/cluster1-sa", "/v1, Kind=Secret, ns1/m1", @@ -129,10 +129,10 @@ var moveTests = []struct { ).Objs(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/cluster1-ca", "/v1, Kind=Secret, ns1/cluster1-kubeconfig", @@ -141,19 +141,19 @@ var moveTests = []struct { "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureCluster, ns1/cluster1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachineTemplate, ns1/ms1", }, - { //group 3 (objects with ownerReferences in group 1,2) + { // group 3 (objects with ownerReferences in group 1,2) // owned by MachineSets "cluster.x-k8s.io/v1alpha4, Kind=Machine, ns1/m1", "cluster.x-k8s.io/v1alpha4, Kind=Machine, ns1/m2", }, - { //group 4 (objects with ownerReferences in group 1,2,3) + { // group 4 (objects with ownerReferences in group 1,2,3) // owned by Machines "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/m1", "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/m2", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/m1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/m2", }, - { //group 5 (objects with ownerReferences in group 1,2,3,4) + { // group 5 (objects with ownerReferences in group 1,2,3,4) // owned by GenericBootstrapConfigs "/v1, Kind=Secret, ns1/m1", "/v1, Kind=Secret, ns1/m2", @@ -177,10 +177,10 @@ var moveTests = []struct { ).Objs(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/cluster1-ca", "/v1, Kind=Secret, ns1/cluster1-kubeconfig", @@ -189,23 +189,23 @@ var moveTests = []struct { "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureCluster, ns1/cluster1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachineTemplate, ns1/md1", }, - { //group 3 (objects with ownerReferences in group 1,2) + { // group 3 (objects with ownerReferences in group 1,2) // owned by MachineDeployments "cluster.x-k8s.io/v1alpha4, Kind=MachineSet, ns1/ms1", }, - { //group 4 (objects with ownerReferences in group 1,2,3) + { // group 4 (objects with ownerReferences in group 1,2,3) // owned by MachineSets "cluster.x-k8s.io/v1alpha4, Kind=Machine, ns1/m1", "cluster.x-k8s.io/v1alpha4, Kind=Machine, ns1/m2", }, - { //group 5 (objects with ownerReferences in group 1,2,3,4) + { // group 5 (objects with ownerReferences in group 1,2,3,4) // owned by Machines "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/m1", "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/m2", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/m1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/m2", }, - { //group 6 (objects with ownerReferences in group 1,2,3,5,6) + { // group 6 (objects with ownerReferences in group 1,2,3,5,6) // owned by GenericBootstrapConfigs "/v1, Kind=Secret, ns1/m1", "/v1, Kind=Secret, ns1/m2", @@ -226,30 +226,30 @@ var moveTests = []struct { ).Objs(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/cluster1-ca", "controlplane.cluster.x-k8s.io/v1alpha4, Kind=GenericControlPlane, ns1/cp1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureCluster, ns1/cluster1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachineTemplate, ns1/cp1", }, - { //group 3 (objects with ownerReferences in group 1,2) + { // group 3 (objects with ownerReferences in group 1,2) "/v1, Kind=Secret, ns1/cluster1-kubeconfig", "/v1, Kind=Secret, ns1/cluster1-sa", "cluster.x-k8s.io/v1alpha4, Kind=Machine, ns1/m1", "cluster.x-k8s.io/v1alpha4, Kind=Machine, ns1/m2", }, - { //group 4 (objects with ownerReferences in group 1,2,3) + { // group 4 (objects with ownerReferences in group 1,2,3) // owned by Machines "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/m1", "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/m2", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/m1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/m2", }, - { //group 5 (objects with ownerReferences in group 1,2,3,4) + { // group 5 (objects with ownerReferences in group 1,2,3,4) // owned by GenericBootstrapConfigs "/v1, Kind=Secret, ns1/m1", "/v1, Kind=Secret, ns1/m2", @@ -266,10 +266,10 @@ var moveTests = []struct { ).Objs(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/cluster1-ca", "/v1, Kind=Secret, ns1/cluster1-kubeconfig", @@ -292,11 +292,11 @@ var moveTests = []struct { }(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/foo", "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/bar", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/foo-ca", "/v1, Kind=Secret, ns1/foo-kubeconfig", @@ -339,11 +339,11 @@ var moveTests = []struct { }(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster2", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/cluster1-ca", "/v1, Kind=Secret, ns1/cluster1-kubeconfig", @@ -355,21 +355,21 @@ var moveTests = []struct { "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfigTemplate, ns1/cluster2-ms1", "cluster.x-k8s.io/v1alpha4, Kind=MachineSet, ns1/cluster2-ms1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureCluster, ns1/cluster2", - "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachineTemplate, ns1/shared", //shared object + "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachineTemplate, ns1/shared", // shared object }, - { //group 3 (objects with ownerReferences in group 1,2) + { // group 3 (objects with ownerReferences in group 1,2) // owned by MachineSets "cluster.x-k8s.io/v1alpha4, Kind=Machine, ns1/cluster1-m1", "cluster.x-k8s.io/v1alpha4, Kind=Machine, ns1/cluster2-m1", }, - { //group 4 (objects with ownerReferences in group 1,2,3) + { // group 4 (objects with ownerReferences in group 1,2,3) // owned by Machines "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/cluster1-m1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/cluster1-m1", "bootstrap.cluster.x-k8s.io/v1alpha4, Kind=GenericBootstrapConfig, ns1/cluster2-m1", "infrastructure.cluster.x-k8s.io/v1alpha4, Kind=GenericInfrastructureMachine, ns1/cluster2-m1", }, - { //group 5 (objects with ownerReferences in group 1,2,3,4) + { // group 5 (objects with ownerReferences in group 1,2,3,4) // owned by GenericBootstrapConfigs "/v1, Kind=Secret, ns1/cluster1-m1", "/v1, Kind=Secret, ns1/cluster2-m1", @@ -393,13 +393,13 @@ var moveTests = []struct { }(), }, wantMoveGroups: [][]string{ - { //group 1 + { // group 1 // Cluster "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // ClusterResourceSet "addons.cluster.x-k8s.io/v1alpha4, Kind=ClusterResourceSet, ns1/crs1", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/cluster1-ca", "/v1, Kind=Secret, ns1/cluster1-kubeconfig", @@ -429,7 +429,7 @@ var moveTests = []struct { "external.cluster.x-k8s.io/v1alpha4, Kind=GenericExternalObject, ns1/externalTest1", "external.cluster.x-k8s.io/v1alpha4, Kind=GenericExternalObject, /externalTest2", }, - { //group 2 (objects with ownerReferences in group 1) + { // group 2 (objects with ownerReferences in group 1) // owned by Clusters "/v1, Kind=Secret, ns1/foo-ca", "/v1, Kind=Secret, ns1/foo-kubeconfig", diff --git a/cmd/clusterctl/client/cluster/objectgraph.go b/cmd/clusterctl/client/cluster/objectgraph.go index b713687ecbb0..9f6be3163c49 100644 --- a/cmd/clusterctl/client/cluster/objectgraph.go +++ b/cmd/clusterctl/client/cluster/objectgraph.go @@ -63,7 +63,7 @@ type node struct { // virtual records if this node was discovered indirectly, e.g. by processing an OwnerRef, but not yet observed as a concrete object. virtual bool - //newID stores the new UID the objects gets once created in the target cluster. + // newID stores the new UID the objects gets once created in the target cluster. newUID types.UID // tenantClusters define the list of Clusters which are tenant for the node, no matter if the node has a direct OwnerReference to the Cluster or if diff --git a/cmd/clusterctl/client/cluster/objectgraph_test.go b/cmd/clusterctl/client/cluster/objectgraph_test.go index 3b300da5a266..e1ad75407829 100644 --- a/cmd/clusterctl/client/cluster/objectgraph_test.go +++ b/cmd/clusterctl/client/cluster/objectgraph_test.go @@ -82,12 +82,6 @@ func TestObjectGraph_getDiscoveryTypeMetaList(t *testing.T) { } } -func sortTypeMetaList(list []metav1.TypeMeta) func(i int, j int) bool { - return func(i, j int) bool { - return list[i].GroupVersionKind().String() < list[j].GroupVersionKind().String() - } -} - type wantGraphItem struct { virtual bool owners []string @@ -345,7 +339,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -372,7 +366,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -404,7 +398,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -420,7 +414,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster2-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster2", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster2", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster2-kubeconfig": { @@ -449,7 +443,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -505,7 +499,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -577,7 +571,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -652,7 +646,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, @@ -718,7 +712,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -794,7 +788,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -842,7 +836,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster2-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster2", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster2", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster2-kubeconfig": { @@ -911,7 +905,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -967,7 +961,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -983,7 +977,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster2-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster2", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster2", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster2-kubeconfig": { @@ -1041,7 +1035,7 @@ var objectGraphsTests = []struct { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -1172,7 +1166,7 @@ func TestObjectGraph_DiscoveryByNamespace(t *testing.T) { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { @@ -1188,7 +1182,7 @@ func TestObjectGraph_DiscoveryByNamespace(t *testing.T) { }, "/v1, Kind=Secret, ns2/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns2/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns2/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns2/cluster1-kubeconfig": { @@ -1220,7 +1214,7 @@ func TestObjectGraph_DiscoveryByNamespace(t *testing.T) { }, "/v1, Kind=Secret, ns1/cluster1-ca": { softOwners: []string{ - "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", //NB. this secret is not linked to the cluster through owner ref + "cluster.x-k8s.io/v1alpha4, Kind=Cluster, ns1/cluster1", // NB. this secret is not linked to the cluster through owner ref }, }, "/v1, Kind=Secret, ns1/cluster1-kubeconfig": { diff --git a/cmd/clusterctl/client/cluster/proxy.go b/cmd/clusterctl/client/cluster/proxy.go index 11321e390632..5c1847e7ea53 100644 --- a/cmd/clusterctl/client/cluster/proxy.go +++ b/cmd/clusterctl/client/cluster/proxy.go @@ -36,7 +36,7 @@ import ( ) var ( - Scheme = scheme.Scheme + localScheme = scheme.Scheme ) type proxy struct { @@ -138,7 +138,7 @@ func (k *proxy) NewClient() (client.Client, error) { connectBackoff := newConnectBackoff() if err := retryWithExponentialBackoff(connectBackoff, func() error { var err error - c, err = client.New(config, client.Options{Scheme: Scheme}) + c, err = client.New(config, client.Options{Scheme: localScheme}) if err != nil { return err } diff --git a/cmd/clusterctl/client/config/providers_client.go b/cmd/clusterctl/client/config/providers_client.go index acdb295b908e..1856a16690f5 100644 --- a/cmd/clusterctl/client/config/providers_client.go +++ b/cmd/clusterctl/client/config/providers_client.go @@ -26,11 +26,13 @@ import ( clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" ) +// Core providers. const ( - // Core providers. ClusterAPIProviderName = "cluster-api" +) - // Infra providers. +// Infra providers. +const ( AWSProviderName = "aws" AzureProviderName = "azure" DockerProviderName = "docker" @@ -41,18 +43,24 @@ const ( PacketProviderName = "packet" SideroProviderName = "sidero" VSphereProviderName = "vsphere" +) - // Bootstrap providers. +// Bootstrap providers. +const ( KubeadmBootstrapProviderName = "kubeadm" TalosBootstrapProviderName = "talos" AWSEKSBootstrapProviderName = "aws-eks" +) - // ControlPlane providers. +// ControlPlane providers. +const ( KubeadmControlPlaneProviderName = "kubeadm" TalosControlPlaneProviderName = "talos" AWSEKSControlPlaneProviderName = "aws-eks" +) - // Other. +// Other. +const ( ProvidersConfigKey = "providers" ) @@ -238,7 +246,7 @@ func (p *providersClient) Get(name string, providerType clusterctlv1.ProviderTyp return nil, err } - provider := NewProvider(name, "", providerType) //Nb. Having the url empty is fine because the url is not considered by SameAs. + provider := NewProvider(name, "", providerType) // NB. Having the url empty is fine because the url is not considered by SameAs. for _, r := range l { if r.SameAs(provider) { return r, nil diff --git a/cmd/clusterctl/client/config/providers_client_test.go b/cmd/clusterctl/client/config/providers_client_test.go index da1464acebd7..1b5d0f3e3f44 100644 --- a/cmd/clusterctl/client/config/providers_client_test.go +++ b/cmd/clusterctl/client/config/providers_client_test.go @@ -107,7 +107,7 @@ func Test_providers_List(t *testing.T) { configGetter: test.NewFakeReader(). WithVar( ProvidersConfigKey, - "- name: \"\"\n"+ //name must not be empty + "- name: \"\"\n"+ // name must not be empty " url: \"\"\n"+ " type: \"\"\n", ), diff --git a/cmd/clusterctl/client/config/reader_viper.go b/cmd/clusterctl/client/config/reader_viper.go index 8cf0235e19a0..bf02c93fe8c2 100644 --- a/cmd/clusterctl/client/config/reader_viper.go +++ b/cmd/clusterctl/client/config/reader_viper.go @@ -49,7 +49,7 @@ type viperReader struct { type viperReaderOption func(*viperReader) -func InjectConfigPaths(configPaths []string) viperReaderOption { +func injectConfigPaths(configPaths []string) viperReaderOption { return func(vr *viperReader) { vr.configPaths = configPaths } diff --git a/cmd/clusterctl/client/config/reader_viper_test.go b/cmd/clusterctl/client/config/reader_viper_test.go index 4cc834b278d8..d71a2925ea51 100644 --- a/cmd/clusterctl/client/config/reader_viper_test.go +++ b/cmd/clusterctl/client/config/reader_viper_test.go @@ -108,7 +108,7 @@ func Test_viperReader_Init(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gg := NewWithT(t) - v := newViperReader(InjectConfigPaths(tt.configDirs)) + v := newViperReader(injectConfigPaths(tt.configDirs)) if tt.expectErr { gg.Expect(v.Init(tt.configPath)).ToNot(Succeed()) return @@ -168,7 +168,7 @@ func Test_viperReader_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - v := newViperReader(InjectConfigPaths([]string{dir})) + v := newViperReader(injectConfigPaths([]string{dir})) gs.Expect(v.Init(configFile)).To(Succeed()) @@ -192,7 +192,7 @@ func Test_viperReader_GetWithoutDefaultConfig(t *testing.T) { os.Setenv("FOO_FOO", "bar") - v := newViperReader(InjectConfigPaths([]string{dir})) + v := newViperReader(injectConfigPaths([]string{dir})) g.Expect(v.Init("")).To(Succeed()) got, err := v.Get("FOO_FOO") diff --git a/cmd/clusterctl/client/get_kubeconfig.go b/cmd/clusterctl/client/get_kubeconfig.go index 40a632be4d25..7f33402d7d8f 100644 --- a/cmd/clusterctl/client/get_kubeconfig.go +++ b/cmd/clusterctl/client/get_kubeconfig.go @@ -20,7 +20,7 @@ import ( "github.com/pkg/errors" ) -//GetKubeconfigOptions carries all the options supported by GetKubeconfig. +// GetKubeconfigOptions carries all the options supported by GetKubeconfig. type GetKubeconfigOptions struct { // Kubeconfig defines the kubeconfig to use for accessing the management cluster. If empty, // default rules for kubeconfig discovery will be used. diff --git a/cmd/clusterctl/client/repository/client.go b/cmd/clusterctl/client/repository/client.go index 2a342d836331..e9c2f403e8d3 100644 --- a/cmd/clusterctl/client/repository/client.go +++ b/cmd/clusterctl/client/repository/client.go @@ -150,7 +150,7 @@ type Repository interface { var _ Repository = &test.FakeRepository{} -//repositoryFactory returns the repository implementation corresponding to the provider URL. +// repositoryFactory returns the repository implementation corresponding to the provider URL. func repositoryFactory(providerConfig config.Provider, configVariablesClient config.VariablesClient) (Repository, error) { // parse the repository url rURL, err := url.Parse(providerConfig.URL()) diff --git a/cmd/clusterctl/client/repository/components.go b/cmd/clusterctl/client/repository/components.go index 719faa71647a..cdcb12f41207 100644 --- a/cmd/clusterctl/client/repository/components.go +++ b/cmd/clusterctl/client/repository/components.go @@ -44,12 +44,15 @@ const ( customResourceDefinitionKind = "CustomResourceDefinition" deploymentKind = "Deployment" - WebhookNamespaceName = "capi-webhook-system" - controllerContainerName = "manager" namespaceArgPrefix = "--namespace=" ) +const ( + // WebhookNamespaceName is the namespace used to deploy Cluster API webhooks. + WebhookNamespaceName = "capi-webhook-system" +) + // Components wraps a YAML file that defines the provider components // to be installed in a management cluster (CRD, Controller, RBAC etc.) // It is important to notice that clusterctl applies a set of processing steps to the “raw” component YAML read @@ -199,7 +202,7 @@ type ComponentsInput struct { // 4. Ensure all the ClusterRoleBinding which are referencing namespaced objects have the name prefixed with the namespace name // 5. Set the watching namespace for the provider controller // 6. Adds labels to all the components in order to allow easy identification of the provider objects. -func NewComponents(input ComponentsInput) (*components, error) { +func NewComponents(input ComponentsInput) (Components, error) { variables, err := input.Processor.GetVariables(input.RawYaml) if err != nil { return nil, err diff --git a/cmd/clusterctl/client/repository/components_test.go b/cmd/clusterctl/client/repository/components_test.go index 5ba4ad3305b5..91f7e17a69c9 100644 --- a/cmd/clusterctl/client/repository/components_test.go +++ b/cmd/clusterctl/client/repository/components_test.go @@ -189,7 +189,7 @@ func Test_fixTargetNamespace(t *testing.T) { g := NewWithT(t) got := fixTargetNamespace(tt.args.objs, tt.args.targetNamespace) - g.Expect(got).To(ContainElements(tt.want)) //skipping from test the automatically added namespace Object + g.Expect(got).To(ContainElements(tt.want)) // skipping from test the automatically added namespace Object }) } } diff --git a/cmd/clusterctl/client/repository/metadata_client_test.go b/cmd/clusterctl/client/repository/metadata_client_test.go index d136be0038e3..c7070b0b596f 100644 --- a/cmd/clusterctl/client/repository/metadata_client_test.go +++ b/cmd/clusterctl/client/repository/metadata_client_test.go @@ -73,7 +73,7 @@ func Test_metadataClient_Get(t *testing.T) { fields: fields{ provider: config.NewProvider("p1", "", clusterctlv1.CoreProviderType), version: "v1.0.0", - repository: test.NewFakeRepository(). //repository without a metadata file + repository: test.NewFakeRepository(). // repository without a metadata file WithPaths("root", ""). WithDefaultVersion("v1.0.0"), }, diff --git a/cmd/clusterctl/client/repository/repository_github.go b/cmd/clusterctl/client/repository/repository_github.go index 3f66d6738afe..e3543a0b60f6 100644 --- a/cmd/clusterctl/client/repository/repository_github.go +++ b/cmd/clusterctl/client/repository/repository_github.go @@ -336,7 +336,7 @@ func (g *gitHubRepository) downloadFilesFromRelease(release *github.RepositoryRe return nil, g.handleGithubErr(err, "failed to download file %q from %q release", *release.TagName, fileName) } if redirect != "" { - response, err := http.Get(redirect) //nolint:bodyclose // (NB: The reader is actually closed in a defer) + response, err := http.Get(redirect) //nolint:bodyclose,gosec // (NB: The reader is actually closed in a defer) if err != nil { return nil, errors.Wrapf(err, "failed to download file %q from %q release via redirect location %q", *release.TagName, fileName, redirect) } diff --git a/cmd/clusterctl/client/repository/repository_github_test.go b/cmd/clusterctl/client/repository/repository_github_test.go index f29224321a64..49aa28c37a8a 100644 --- a/cmd/clusterctl/client/repository/repository_github_test.go +++ b/cmd/clusterctl/client/repository/repository_github_test.go @@ -301,7 +301,7 @@ func Test_gitHubRepository_getLatestRelease(t *testing.T) { // setup an handler for returning no releases mux.HandleFunc("/repos/o/r2/releases", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") - //no releases + // no releases }) // setup an handler for returning fake prereleases only @@ -439,7 +439,7 @@ func Test_gitHubRepository_downloadFilesFromRelease(t *testing.T) { client, mux, teardown := test.NewFakeGitHub() defer teardown() - providerConfig := config.NewProvider("test", "https://github.com/o/r/releases/v0.4.1/file.yaml", clusterctlv1.CoreProviderType) //tree/master/path not relevant for the test + providerConfig := config.NewProvider("test", "https://github.com/o/r/releases/v0.4.1/file.yaml", clusterctlv1.CoreProviderType) // tree/master/path not relevant for the test // test.NewFakeGitHub an handler for returning a fake release asset mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) { @@ -506,7 +506,7 @@ func Test_gitHubRepository_downloadFilesFromRelease(t *testing.T) { TagName: &tagName, Assets: []*github.ReleaseAsset{ { - ID: &id2, //id does not match any file (this should not happen) + ID: &id2, // id does not match any file (this should not happen) Name: &file, }, }, diff --git a/cmd/clusterctl/client/repository/repository_local.go b/cmd/clusterctl/client/repository/repository_local.go index ea5c134341a1..bd053ad2397e 100644 --- a/cmd/clusterctl/client/repository/repository_local.go +++ b/cmd/clusterctl/client/repository/repository_local.go @@ -28,6 +28,10 @@ import ( "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" ) +const ( + latestVersionTag = "latest" +) + // localRepository provides support for providers located on the local filesystem. // As part of the provider object, the URL is expected to contain the absolute // path to the components yaml on the local filesystem. @@ -83,7 +87,7 @@ func (r *localRepository) ComponentsPath() string { func (r *localRepository) GetFile(version, fileName string) ([]byte, error) { var err error - if version == "latest" { + if version == latestVersionTag { version, err = r.getLatestRelease() if err != nil { return nil, errors.Wrapf(err, "failed to get the latest release") @@ -162,7 +166,7 @@ func newLocalRepository(providerConfig config.Provider, configVariablesClient co componentsPath := urlSplit[len(urlSplit)-1] defaultVersion := urlSplit[len(urlSplit)-2] - if defaultVersion != "latest" { + if defaultVersion != latestVersionTag { _, err = version.ParseSemantic(defaultVersion) if err != nil { return nil, errors.Errorf("invalid version: %q. Version must obey the syntax and semantics of the \"Semantic Versioning\" specification (http://semver.org/) and path format {basepath}/{provider-name}/{version}/{components.yaml}", defaultVersion) @@ -187,7 +191,7 @@ func newLocalRepository(providerConfig config.Provider, configVariablesClient co componentsPath: componentsPath, } - if defaultVersion == "latest" { + if defaultVersion == latestVersionTag { repo.defaultVersion, err = repo.getLatestRelease() if err != nil { return nil, errors.Wrap(err, "failed to get latest version") diff --git a/cmd/clusterctl/client/repository/template.go b/cmd/clusterctl/client/repository/template.go index 0e4c6e85743e..d5488263df4a 100644 --- a/cmd/clusterctl/client/repository/template.go +++ b/cmd/clusterctl/client/repository/template.go @@ -79,7 +79,7 @@ type TemplateInput struct { } // NewTemplate returns a new objects embedding a cluster template YAML file. -func NewTemplate(input TemplateInput) (*template, error) { +func NewTemplate(input TemplateInput) (Template, error) { variables, err := input.Processor.GetVariables(input.RawArtifact) if err != nil { return nil, err diff --git a/cmd/clusterctl/client/tree/discovery.go b/cmd/clusterctl/client/tree/discovery.go index 97b55acd8e6a..4e5b14e9e0a0 100644 --- a/cmd/clusterctl/client/tree/discovery.go +++ b/cmd/clusterctl/client/tree/discovery.go @@ -41,11 +41,7 @@ type DiscoverOptions struct { } func (d DiscoverOptions) toObjectTreeOptions() ObjectTreeOptions { - return ObjectTreeOptions{ - ShowOtherConditions: d.ShowOtherConditions, - DisableNoEcho: d.DisableNoEcho, - DisableGrouping: d.DisableGrouping, - } + return ObjectTreeOptions(d) } // Discovery returns an object tree representing the status of a Cluster API cluster. diff --git a/cmd/clusterctl/client/tree/tree_test.go b/cmd/clusterctl/client/tree/tree_test.go index c90e8cc9f10e..ca043944f070 100644 --- a/cmd/clusterctl/client/tree/tree_test.go +++ b/cmd/clusterctl/client/tree/tree_test.go @@ -707,7 +707,7 @@ func Test_Add_Grouping(t *testing.T) { type clusterOption func(*clusterv1.Cluster) -func fakeCluster(name string, options ...clusterOption) *clusterv1.Cluster { // nolint:unparam +func fakeCluster(name string, options ...clusterOption) *clusterv1.Cluster { c := &clusterv1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: "Cluster", diff --git a/cmd/clusterctl/cmd/config_repositories.go b/cmd/clusterctl/cmd/config_repositories.go index 792b283a6863..efa599270823 100644 --- a/cmd/clusterctl/cmd/config_repositories.go +++ b/cmd/clusterctl/cmd/config_repositories.go @@ -108,7 +108,7 @@ func runGetRepositories(cfgFile string, out io.Writer) error { if err != nil { return err } - fmt.Fprintf(w, string(y)) + fmt.Fprint(w, string(y)) } w.Flush() return nil diff --git a/cmd/clusterctl/cmd/generate_cluster.go b/cmd/clusterctl/cmd/generate_cluster.go index a8e40c458d38..ce4e44eb9840 100644 --- a/cmd/clusterctl/cmd/generate_cluster.go +++ b/cmd/clusterctl/cmd/generate_cluster.go @@ -18,6 +18,7 @@ package cmd import ( "fmt" + "github.com/spf13/cobra" "sigs.k8s.io/cluster-api/cmd/clusterctl/client" ) diff --git a/cmd/clusterctl/cmd/rollout/restart.go b/cmd/clusterctl/cmd/rollout/restart.go index 0582432a3d3f..436fef002771 100644 --- a/cmd/clusterctl/cmd/rollout/restart.go +++ b/cmd/clusterctl/cmd/rollout/restart.go @@ -64,7 +64,7 @@ func NewCmdRolloutRestart(cfgFile string) *cobra.Command { return cmd } -func runRestart(cfgFile string, cmd *cobra.Command, args []string) error { +func runRestart(cfgFile string, _ *cobra.Command, args []string) error { restartOpt.resources = args c, err := client.New(cfgFile) diff --git a/cmd/clusterctl/cmd/root.go b/cmd/clusterctl/cmd/root.go index 9da0e18c24d2..0022544f1b1e 100644 --- a/cmd/clusterctl/cmd/root.go +++ b/cmd/clusterctl/cmd/root.go @@ -41,6 +41,7 @@ var ( verbosity *int ) +// RootCmd is clusterctl root CLI command. var RootCmd = &cobra.Command{ Use: "clusterctl", SilenceUsage: true, diff --git a/cmd/clusterctl/config/embedded_manifest.go b/cmd/clusterctl/config/embedded_manifest.go index f278d741ebf2..f43f781489d1 100644 --- a/cmd/clusterctl/config/embedded_manifest.go +++ b/cmd/clusterctl/config/embedded_manifest.go @@ -19,5 +19,7 @@ package config import _ "embed" +// ClusterctlAPIManifest contains the clustectl manifests in raw bytes format. +// //go:embed manifest/clusterctl-api.yaml var ClusterctlAPIManifest []byte diff --git a/cmd/clusterctl/internal/test/fake_objects.go b/cmd/clusterctl/internal/test/fake_objects.go index 0bb322bdc976..2b7489ef5e23 100644 --- a/cmd/clusterctl/internal/test/fake_objects.go +++ b/cmd/clusterctl/internal/test/fake_objects.go @@ -1183,12 +1183,12 @@ func FakeCustomResourceDefinition(group string, kind string, versions ...string) APIVersion: "CustomResourceDefinition", }, ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s.%s", strings.ToLower(kind), group), //NB. this technically should use plural(kind), but for the sake of test what really matters is to generate a unique name + Name: fmt.Sprintf("%s.%s", strings.ToLower(kind), group), // NB. this technically should use plural(kind), but for the sake of test what really matters is to generate a unique name Labels: map[string]string{ clusterctlv1.ClusterctlLabelName: "", }, }, - Spec: apiextensionslv1.CustomResourceDefinitionSpec{ //NB. the spec contains only what is strictly required by the move test + Spec: apiextensionslv1.CustomResourceDefinitionSpec{ // NB. the spec contains only what is strictly required by the move test Group: group, Names: apiextensionslv1.CustomResourceDefinitionNames{ Kind: kind, diff --git a/cmd/clusterctl/internal/test/fake_proxy.go b/cmd/clusterctl/internal/test/fake_proxy.go index 35eb151ad80b..014dfe72b483 100644 --- a/cmd/clusterctl/internal/test/fake_proxy.go +++ b/cmd/clusterctl/internal/test/fake_proxy.go @@ -42,7 +42,7 @@ type FakeProxy struct { } var ( - FakeScheme = runtime.NewScheme() + FakeScheme = runtime.NewScheme() //nolint:golint ) func init() { diff --git a/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go b/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go index 41c5b5983a91..f4c8d80a76fe 100644 --- a/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go +++ b/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go @@ -21,12 +21,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// GenericControlPlaneSpec contains a generic control plane spec. type GenericControlPlaneSpec struct { InfrastructureTemplate corev1.ObjectReference `json:"infrastructureTemplate"` } // +kubebuilder:object:root=true +// GenericControlPlane is a generic representation of a control plane. type GenericControlPlane struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -35,6 +37,7 @@ type GenericControlPlane struct { // +kubebuilder:object:root=true +// GenericControlPlaneList is list of generic control planes. type GenericControlPlaneList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/cmd/clusterctl/log/logger.go b/cmd/clusterctl/log/logger.go index a0b0f9112ff7..7629ceaf3db0 100644 --- a/cmd/clusterctl/log/logger.go +++ b/cmd/clusterctl/log/logger.go @@ -94,7 +94,7 @@ func (l *logger) Error(err error, msg string, kvs ...interface{}) { } // V returns an InfoLogger value for a specific verbosity level. -func (l *logger) V(level int) logr.InfoLogger { +func (l *logger) V(level int) logr.Logger { nl := l.clone() nl.level = level return nl diff --git a/controllers/external/testing.go b/controllers/external/testing.go index 98d9205c7479..5b0f051abe0f 100644 --- a/controllers/external/testing.go +++ b/controllers/external/testing.go @@ -24,6 +24,7 @@ import ( ) var ( + // TestGenericBootstrapCRD is a generic boostrap CRD. TestGenericBootstrapCRD = &apiextensionsv1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ APIVersion: apiextensionsv1.SchemeGroupVersion.String(), @@ -70,6 +71,7 @@ var ( }, } + // TestGenericBootstrapTemplateCRD is a generic boostrap template CRD. TestGenericBootstrapTemplateCRD = &apiextensionsv1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ APIVersion: apiextensionsv1.SchemeGroupVersion.String(), @@ -116,6 +118,7 @@ var ( }, } + // TestGenericInfrastructureCRD is a generic infrastructure CRD. TestGenericInfrastructureCRD = &apiextensionsv1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ APIVersion: apiextensionsv1.SchemeGroupVersion.String(), @@ -162,6 +165,7 @@ var ( }, } + // TestGenericInfrastructureTemplateCRD is a generic infrastructure template CRD. TestGenericInfrastructureTemplateCRD = &apiextensionsv1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ APIVersion: apiextensionsv1.SchemeGroupVersion.String(), @@ -208,6 +212,7 @@ var ( }, } + // TestGenericInfrastructureRemediationCRD is a generic infrastructure remediation CRD. TestGenericInfrastructureRemediationCRD = &apiextensionsv1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ APIVersion: apiextensionsv1.SchemeGroupVersion.String(), @@ -254,6 +259,7 @@ var ( }, } + // TestGenericInfrastructureRemediationTemplateCRD is a generic infrastructure remediation template CRD. TestGenericInfrastructureRemediationTemplateCRD = &apiextensionsv1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ APIVersion: apiextensionsv1.SchemeGroupVersion.String(), diff --git a/controllers/external/util.go b/controllers/external/util.go index 42b488fe427c..3dee74ec15a0 100644 --- a/controllers/external/util.go +++ b/controllers/external/util.go @@ -107,7 +107,7 @@ func CloneTemplate(ctx context.Context, in *CloneTemplateInput) (*corev1.ObjectR return GetObjectReference(to), nil } -// GenerateTemplate input is everything needed to generate a new template. +// GenerateTemplateInput is the input needed to generate a new template. type GenerateTemplateInput struct { // Template is the TemplateRef turned into an unstructured. // +required diff --git a/controllers/external/util_test.go b/controllers/external/util_test.go index ab1bca0f969b..476a1100c693 100644 --- a/controllers/external/util_test.go +++ b/controllers/external/util_test.go @@ -37,10 +37,14 @@ var ( ctx = ctrl.SetupSignalHandler() ) +const ( + testNamespace = "test" + testClusterName = "test-cluster" +) + func TestGetResourceFound(t *testing.T) { g := NewWithT(t) - namespace := "test" testResourceName := "greenTemplate" testResourceKind := "GreenTemplate" testResourceAPIVersion := "green.io/v1" @@ -50,18 +54,18 @@ func TestGetResourceFound(t *testing.T) { testResource.SetKind(testResourceKind) testResource.SetAPIVersion(testResourceAPIVersion) testResource.SetName(testResourceName) - testResource.SetNamespace(namespace) + testResource.SetNamespace(testNamespace) testResource.SetResourceVersion(testResourceVersion) testResourceReference := &corev1.ObjectReference{ Kind: testResourceKind, APIVersion: testResourceAPIVersion, Name: testResourceName, - Namespace: namespace, + Namespace: testNamespace, } fakeClient := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).WithObjects(testResource.DeepCopy()).Build() - got, err := Get(ctx, fakeClient, testResourceReference, namespace) + got, err := Get(ctx, fakeClient, testResourceReference, testNamespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(got).To(Equal(testResource)) } @@ -87,21 +91,20 @@ func TestGetResourceNotFound(t *testing.T) { func TestCloneTemplateResourceNotFound(t *testing.T) { g := NewWithT(t) - namespace := "test" testClusterName := "bar" testResourceReference := &corev1.ObjectReference{ Kind: "OrangeTemplate", APIVersion: "orange.io/v1", Name: "orangeTemplate", - Namespace: namespace, + Namespace: testNamespace, } fakeClient := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build() _, err := CloneTemplate(ctx, &CloneTemplateInput{ Client: fakeClient, TemplateRef: testResourceReference, - Namespace: namespace, + Namespace: testNamespace, ClusterName: testClusterName, }) g.Expect(err).To(HaveOccurred()) @@ -111,9 +114,6 @@ func TestCloneTemplateResourceNotFound(t *testing.T) { func TestCloneTemplateResourceFound(t *testing.T) { g := NewWithT(t) - namespace := "test" - testClusterName := "test-cluster" - templateName := "purpleTemplate" templateKind := "PurpleTemplate" templateAPIVersion := "purple.io/v1" @@ -124,7 +124,7 @@ func TestCloneTemplateResourceFound(t *testing.T) { "apiVersion": templateAPIVersion, "metadata": map[string]interface{}{ "name": templateName, - "namespace": namespace, + "namespace": testNamespace, }, "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -150,7 +150,7 @@ func TestCloneTemplateResourceFound(t *testing.T) { Kind: templateKind, APIVersion: templateAPIVersion, Name: templateName, - Namespace: namespace, + Namespace: testNamespace, } owner := metav1.OwnerReference{ @@ -176,7 +176,7 @@ func TestCloneTemplateResourceFound(t *testing.T) { ref, err := CloneTemplate(ctx, &CloneTemplateInput{ Client: fakeClient, TemplateRef: templateRef.DeepCopy(), - Namespace: namespace, + Namespace: testNamespace, ClusterName: testClusterName, OwnerRef: owner.DeepCopy(), Labels: map[string]string{ @@ -192,7 +192,7 @@ func TestCloneTemplateResourceFound(t *testing.T) { g.Expect(ref).NotTo(BeNil()) g.Expect(ref.Kind).To(Equal(expectedKind)) g.Expect(ref.APIVersion).To(Equal(expectedAPIVersion)) - g.Expect(ref.Namespace).To(Equal(namespace)) + g.Expect(ref.Namespace).To(Equal(testNamespace)) g.Expect(ref.Name).To(HavePrefix(templateRef.Name)) clone := &unstructured.Unstructured{} @@ -225,9 +225,6 @@ func TestCloneTemplateResourceFound(t *testing.T) { func TestCloneTemplateResourceFoundNoOwner(t *testing.T) { g := NewWithT(t) - namespace := "test" - testClusterName := "test-cluster" - templateName := "yellowTemplate" templateKind := "YellowTemplate" templateAPIVersion := "yellow.io/v1" @@ -238,7 +235,7 @@ func TestCloneTemplateResourceFoundNoOwner(t *testing.T) { "apiVersion": templateAPIVersion, "metadata": map[string]interface{}{ "name": templateName, - "namespace": namespace, + "namespace": testNamespace, }, "spec": map[string]interface{}{ "template": map[string]interface{}{ @@ -254,7 +251,7 @@ func TestCloneTemplateResourceFoundNoOwner(t *testing.T) { Kind: templateKind, APIVersion: templateAPIVersion, Name: templateName, - Namespace: namespace, + Namespace: testNamespace, } expectedKind := "Yellow" @@ -271,14 +268,14 @@ func TestCloneTemplateResourceFoundNoOwner(t *testing.T) { ref, err := CloneTemplate(ctx, &CloneTemplateInput{ Client: fakeClient, TemplateRef: templateRef, - Namespace: namespace, + Namespace: testNamespace, ClusterName: testClusterName, }) g.Expect(err).NotTo(HaveOccurred()) g.Expect(ref).NotTo(BeNil()) g.Expect(ref.Kind).To(Equal(expectedKind)) g.Expect(ref.APIVersion).To(Equal(expectedAPIVersion)) - g.Expect(ref.Namespace).To(Equal(namespace)) + g.Expect(ref.Namespace).To(Equal(testNamespace)) g.Expect(ref.Name).To(HavePrefix(templateRef.Name)) clone := &unstructured.Unstructured{} @@ -297,9 +294,6 @@ func TestCloneTemplateResourceFoundNoOwner(t *testing.T) { func TestCloneTemplateMissingSpecTemplate(t *testing.T) { g := NewWithT(t) - namespace := "test" - testClusterName := "test-cluster" - templateName := "aquaTemplate" templateKind := "AquaTemplate" templateAPIVersion := "aqua.io/v1" @@ -310,7 +304,7 @@ func TestCloneTemplateMissingSpecTemplate(t *testing.T) { "apiVersion": templateAPIVersion, "metadata": map[string]interface{}{ "name": templateName, - "namespace": namespace, + "namespace": testNamespace, }, "spec": map[string]interface{}{}, }, @@ -320,7 +314,7 @@ func TestCloneTemplateMissingSpecTemplate(t *testing.T) { Kind: templateKind, APIVersion: templateAPIVersion, Name: templateName, - Namespace: namespace, + Namespace: testNamespace, } fakeClient := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).WithObjects(template.DeepCopy()).Build() @@ -328,7 +322,7 @@ func TestCloneTemplateMissingSpecTemplate(t *testing.T) { _, err := CloneTemplate(ctx, &CloneTemplateInput{ Client: fakeClient, TemplateRef: templateRef, - Namespace: namespace, + Namespace: testNamespace, ClusterName: testClusterName, }) g.Expect(err).To(HaveOccurred()) diff --git a/controllers/machine_controller_noderef.go b/controllers/machine_controller_noderef.go index 179d85944e22..e330ee16dbe3 100644 --- a/controllers/machine_controller_noderef.go +++ b/controllers/machine_controller_noderef.go @@ -35,6 +35,7 @@ import ( ) var ( + // ErrNodeNotFound signals that a corev1.Node could not be found for the given provider id. ErrNodeNotFound = errors.New("cannot find node with matching ProviderID") ) diff --git a/controllers/machinedeployment_rollout_ondelete.go b/controllers/machinedeployment_rollout_ondelete.go index 217b36dcf54c..3363414453b1 100644 --- a/controllers/machinedeployment_rollout_ondelete.go +++ b/controllers/machinedeployment_rollout_ondelete.go @@ -19,6 +19,7 @@ package controllers import ( "context" "fmt" + "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" @@ -158,7 +159,7 @@ func (r *MachineDeploymentReconciler) reconcileOldMachineSetsOnDelete(ctx contex return nil } -//reconcileNewMachineSetOnDelete handles reconciliation of the latest MachineSet associated with the MachineDeployment in the OnDelete MachineDeploymentStrategyType. +// reconcileNewMachineSetOnDelete handles reconciliation of the latest MachineSet associated with the MachineDeployment in the OnDelete MachineDeploymentStrategyType. func (r *MachineDeploymentReconciler) reconcileNewMachineSetOnDelete(ctx context.Context, allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet, deployment *clusterv1.MachineDeployment) error { // logic same as reconcile logic for RollingUpdate log := ctrl.LoggerFrom(ctx) diff --git a/controllers/machinehealthcheck_controller_test.go b/controllers/machinehealthcheck_controller_test.go index 920aef54ac55..2c3e0ab1a490 100644 --- a/controllers/machinehealthcheck_controller_test.go +++ b/controllers/machinehealthcheck_controller_test.go @@ -48,7 +48,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -const defaultNamespaceName = "default" +const ( + defaultNamespaceName = "default" + testClusterName = "test-cluster" +) func TestMachineHealthCheck_Reconcile(t *testing.T) { t.Run("it should ensure the correct cluster-name label when no existing labels exist", func(t *testing.T) { @@ -1801,7 +1804,7 @@ func TestClusterToMachineHealthCheck(t *testing.T) { } namespace := defaultNamespaceName - clusterName := "test-cluster" + clusterName := testClusterName labels := make(map[string]string) mhc1 := newMachineHealthCheckWithLabels("mhc1", namespace, clusterName, labels) @@ -1881,7 +1884,7 @@ func TestMachineToMachineHealthCheck(t *testing.T) { } namespace := defaultNamespaceName - clusterName := "test-cluster" + clusterName := testClusterName nodeName := "node1" labels := map[string]string{"cluster": "foo", "nodepool": "bar"} @@ -1957,7 +1960,7 @@ func TestNodeToMachineHealthCheck(t *testing.T) { } namespace := defaultNamespaceName - clusterName := "test-cluster" + clusterName := testClusterName nodeName := "node1" labels := map[string]string{"cluster": "foo", "nodepool": "bar"} @@ -2568,7 +2571,7 @@ func TestPatchTargets(t *testing.T) { g := NewWithT(t) namespace := defaultNamespaceName - clusterName := "test-cluster" + clusterName := testClusterName defaultCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, diff --git a/controllers/mdutil/util.go b/controllers/mdutil/util.go index 2bdd47002402..3afd7ae155c8 100644 --- a/controllers/mdutil/util.go +++ b/controllers/mdutil/util.go @@ -39,6 +39,8 @@ import ( ) const ( + // DefaultMachineDeploymentUniqueLabelKey is the label applied to Machines + // in a MachineDeployment containing the hash of the template. DefaultMachineDeploymentUniqueLabelKey = "machine-template-hash" // FailedMSCreateReason is added in a machine deployment when it cannot create a new machine set. @@ -49,9 +51,6 @@ const ( // estimated once a deployment is paused. PausedDeployReason = "DeploymentPaused" - // - // Available: - // // MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available. MinimumReplicasAvailable = "MinimumReplicasAvailable" // MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas @@ -224,7 +223,7 @@ func SetNewMachineSetAnnotations(deployment *clusterv1.MachineDeployment, newMS logger.Error(err, "Updating machine set revision OldRevision not int") return false } - //If the MS annotation is empty then initialise it to 0 + // If the MS annotation is empty then initialise it to 0 oldRevisionInt = 0 } newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64) @@ -301,7 +300,7 @@ func SetReplicasAnnotations(ms *clusterv1.MachineSet, desiredReplicas, maxReplic return updated } -// AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated. +// ReplicasAnnotationsNeedUpdate return true if the replicas annotation needs to be updated. func ReplicasAnnotationsNeedUpdate(ms *clusterv1.MachineSet, desiredReplicas, maxReplicas int32) bool { if ms.Annotations == nil { return true @@ -635,7 +634,7 @@ func FilterMachineSets(mSes []*clusterv1.MachineSet, filterFn filterMS) []*clust return filtered } -// Clones the given map and returns a new map with the given key and value added. +// CloneAndAddLabel clones the given map and returns a new map with the given key and value added. // Returns the given map, if labelKey is empty. func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map[string]string { if labelKey == "" { @@ -651,7 +650,7 @@ func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map return newLabels } -// Clones the given selector and returns a new selector with the given key and value added. +// CloneSelectorAndAddLabel clones the given selector and returns a new selector with the given key and value added. // Returns the given selector, if labelKey is empty. func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector { if labelKey == "" { @@ -705,6 +704,7 @@ func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { printer.Fprintf(hasher, "%#v", objectToWrite) } +// ComputeHash computes the has of a MachineTemplateSpec. func ComputeHash(template *clusterv1.MachineTemplateSpec) uint32 { machineTemplateSpecHasher := fnv.New32a() DeepHashObject(machineTemplateSpecHasher, *template) diff --git a/controllers/mdutil/util_test.go b/controllers/mdutil/util_test.go index d23829002480..075671585383 100644 --- a/controllers/mdutil/util_test.go +++ b/controllers/mdutil/util_test.go @@ -252,7 +252,7 @@ func TestEqualMachineTemplate(t *testing.T) { t.Run(test.Name, func(t *testing.T) { g := NewWithT(t) - runTest := func(t1, t2 *clusterv1.MachineTemplateSpec, reversed bool) { + runTest := func(t1, t2 *clusterv1.MachineTemplateSpec) { // Run equal := EqualMachineTemplate(t1, t2) g.Expect(equal).To(Equal(test.Expected)) @@ -260,9 +260,9 @@ func TestEqualMachineTemplate(t *testing.T) { g.Expect(t2.Labels).NotTo(BeNil()) } - runTest(&test.Former, &test.Latter, false) + runTest(&test.Former, &test.Latter) // Test the same case in reverse order - runTest(&test.Latter, &test.Former, true) + runTest(&test.Latter, &test.Former) }) } } @@ -712,28 +712,28 @@ func TestMaxUnavailable(t *testing.T) { } } -//Set of simple tests for annotation related util functions. +// TestAnnotationUtils is a set of simple tests for annotation related util functions. func TestAnnotationUtils(t *testing.T) { - //Setup + // Setup tDeployment := generateDeployment("nginx") tMS := generateMS(tDeployment) tDeployment.Annotations[clusterv1.RevisionAnnotation] = "1" logger := klogr.New() - //Test Case 1: Check if anotations are copied properly from deployment to MS + // Test Case 1: Check if anotations are copied properly from deployment to MS t.Run("SetNewMachineSetAnnotations", func(t *testing.T) { g := NewWithT(t) - //Try to set the increment revision from 1 through 20 + // Try to set the increment revision from 1 through 20 for i := 0; i < 20; i++ { nextRevision := fmt.Sprintf("%d", i+1) SetNewMachineSetAnnotations(&tDeployment, &tMS, nextRevision, true, logger) - //Now the MachineSets Revision Annotation should be i+1 + // Now the MachineSets Revision Annotation should be i+1 g.Expect(tMS.Annotations).To(HaveKeyWithValue(clusterv1.RevisionAnnotation, nextRevision)) } }) - //Test Case 2: Check if annotations are set properly + // Test Case 2: Check if annotations are set properly t.Run("SetReplicasAnnotations", func(t *testing.T) { g := NewWithT(t) @@ -742,7 +742,7 @@ func TestAnnotationUtils(t *testing.T) { g.Expect(tMS.Annotations).To(HaveKeyWithValue(clusterv1.MaxReplicasAnnotation, "11")) }) - //Test Case 3: Check if annotations reflect deployments state + // Test Case 3: Check if annotations reflect deployments state tMS.Annotations[clusterv1.DesiredReplicasAnnotation] = "1" tMS.Status.AvailableReplicas = 1 tMS.Spec.Replicas = new(int32) @@ -753,7 +753,6 @@ func TestAnnotationUtils(t *testing.T) { g.Expect(IsSaturated(&tDeployment, &tMS)).To(BeTrue()) }) - //Tear Down } func TestReplicasAnnotationsNeedUpdate(t *testing.T) { diff --git a/controllers/noderefutil/providerid.go b/controllers/noderefutil/providerid.go index e1f9bb585974..385e0021fb47 100644 --- a/controllers/noderefutil/providerid.go +++ b/controllers/noderefutil/providerid.go @@ -24,7 +24,10 @@ import ( ) var ( - ErrEmptyProviderID = errors.New("providerID is empty") + // ErrEmptyProviderID means that the provider id is empty. + ErrEmptyProviderID = errors.New("providerID is empty") + + // ErrInvalidProviderID means that the provider id has an invalid form. ErrInvalidProviderID = errors.New("providerID must be of the form :////") ) diff --git a/controllers/remote/cluster_cache.go b/controllers/remote/cluster_cache.go index 567cb5bc95ea..4c1f6ef3f9f7 100644 --- a/controllers/remote/cluster_cache.go +++ b/controllers/remote/cluster_cache.go @@ -46,7 +46,7 @@ const ( healthCheckPollInterval = 10 * time.Second healthCheckRequestTimeout = 5 * time.Second healthCheckUnhealthyThreshold = 10 - ClusterCacheControllerName = "cluster-cache-tracker" + clusterCacheControllerName = "cluster-cache-tracker" ) // ClusterCacheTracker manages client caches for workload clusters. @@ -119,7 +119,7 @@ func (t *ClusterCacheTracker) getClusterAccessorLH(ctx context.Context, cluster // newClusterAccessor creates a new clusterAccessor. func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster client.ObjectKey) (*clusterAccessor, error) { // Get a rest config for the remote cluster - config, err := RESTConfig(ctx, ClusterCacheControllerName, t.client, cluster) + config, err := RESTConfig(ctx, clusterCacheControllerName, t.client, cluster) if err != nil { return nil, errors.Wrapf(err, "error fetching REST client config for remote cluster %q", cluster.String()) } @@ -155,7 +155,7 @@ func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster cl } // Start the cache!!! - go cache.Start(cacheCtx) + go cache.Start(cacheCtx) //nolint:errcheck // Start cluster healthcheck!!! go t.healthCheckCluster(cacheCtx, &healthCheckInput{ diff --git a/controlplane/kubeadm/api/v1alpha3/condition_consts.go b/controlplane/kubeadm/api/v1alpha3/condition_consts.go index a5b3e4a61202..5178960bcfad 100644 --- a/controlplane/kubeadm/api/v1alpha3/condition_consts.go +++ b/controlplane/kubeadm/api/v1alpha3/condition_consts.go @@ -21,7 +21,7 @@ import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" // Conditions and condition Reasons for the KubeadmControlPlane object const ( - // MachinesReady reports an aggregate of current status of the machines controlled by the KubeadmControlPlane. + // MachinesReadyCondition reports an aggregate of current status of the machines controlled by the KubeadmControlPlane. MachinesReadyCondition clusterv1.ConditionType = "MachinesReady" ) diff --git a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go index e6bcc0152114..21f16f800699 100644 --- a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go @@ -26,8 +26,13 @@ import ( ) const ( + // KubeadmControlPlaneFinalizer is the finalizer applied to KubeadmControlPlane resources + // by its managing controller. KubeadmControlPlaneFinalizer = "kubeadm.controlplane.cluster.x-k8s.io" + // KubeadmControlPlaneHashLabelKey was used to determine the hash of the + // template used to generate a control plane machine. + // // Deprecated: This label has been deprecated and it's not in use anymore. KubeadmControlPlaneHashLabelKey = "kubeadm.controlplane.cluster.x-k8s.io/hash" diff --git a/controlplane/kubeadm/api/v1alpha4/condition_consts.go b/controlplane/kubeadm/api/v1alpha4/condition_consts.go index 4d4136dd1c5f..5bfe23541228 100644 --- a/controlplane/kubeadm/api/v1alpha4/condition_consts.go +++ b/controlplane/kubeadm/api/v1alpha4/condition_consts.go @@ -21,7 +21,7 @@ import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" // Conditions and condition Reasons for the KubeadmControlPlane object const ( - // MachinesReady reports an aggregate of current status of the machines controlled by the KubeadmControlPlane. + // MachinesReadyCondition reports an aggregate of current status of the machines controlled by the KubeadmControlPlane. MachinesReadyCondition clusterv1.ConditionType = "MachinesReady" ) diff --git a/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_types.go index 87e3a15bb202..51ded7d7ef62 100644 --- a/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_types.go @@ -28,12 +28,14 @@ import ( type RolloutStrategyType string const ( - // Replace the old control planes by new one using rolling update + // RollingUpdateStrategyType replaces the old control planes by new one using rolling update // i.e. gradually scale up or down the old control planes and scale up or down the new one. RollingUpdateStrategyType RolloutStrategyType = "RollingUpdate" ) const ( + // KubeadmControlPlaneFinalizer is the finalizer applied to KubeadmControlPlane resources + // by its managing controller. KubeadmControlPlaneFinalizer = "kubeadm.controlplane.cluster.x-k8s.io" // SkipCoreDNSAnnotation annotation explicitly skips reconciling CoreDNS if set. diff --git a/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_webhook.go b/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_webhook.go index 7e3da35dc6c5..de5549363ac2 100644 --- a/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_webhook.go +++ b/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_webhook.go @@ -371,7 +371,7 @@ func (in *KubeadmControlPlane) validateCoreDNSVersion(prev *KubeadmControlPlane) if in.Spec.KubeadmConfigSpec.ClusterConfiguration == nil || prev.Spec.KubeadmConfigSpec.ClusterConfiguration == nil { return allErrs } - //return if either current or target versions is empty + // return if either current or target versions is empty if prev.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" || in.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" { return allErrs } diff --git a/controlplane/kubeadm/controllers/controller.go b/controlplane/kubeadm/controllers/controller.go index 347ef36c85c5..3a7aee87eac1 100644 --- a/controlplane/kubeadm/controllers/controller.go +++ b/controlplane/kubeadm/controllers/controller.go @@ -19,15 +19,10 @@ package controllers import ( "context" "fmt" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" - "sigs.k8s.io/cluster-api/feature" "time" - "sigs.k8s.io/cluster-api/util/collections" - "github.com/blang/semver" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,8 +34,11 @@ import ( "sigs.k8s.io/cluster-api/controllers/remote" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" diff --git a/controlplane/kubeadm/controllers/controller_test.go b/controlplane/kubeadm/controllers/controller_test.go index ed8fef165ab8..1f5f4bf6ad6e 100644 --- a/controlplane/kubeadm/controllers/controller_test.go +++ b/controlplane/kubeadm/controllers/controller_test.go @@ -19,12 +19,13 @@ package controllers import ( "context" "fmt" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" - "sigs.k8s.io/cluster-api/feature" "sync" "testing" "time" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api/feature" + "github.com/blang/semver" . "github.com/onsi/gomega" @@ -139,7 +140,7 @@ func TestReconcileReturnErrorWhenOwnerClusterIsMissing(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(Equal(ctrl.Result{})) - //calling reconcile should return error + // calling reconcile should return error g.Expect(testEnv.Delete(ctx, cluster)).To(Succeed()) g.Eventually(func() error { diff --git a/controlplane/kubeadm/controllers/fakes_test.go b/controlplane/kubeadm/controllers/fakes_test.go index 980ed366f474..ee38d580f331 100644 --- a/controlplane/kubeadm/controllers/fakes_test.go +++ b/controlplane/kubeadm/controllers/fakes_test.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "github.com/blang/semver" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" diff --git a/controlplane/kubeadm/controllers/scale.go b/controlplane/kubeadm/controllers/scale.go index b5791639ba64..10229556464a 100644 --- a/controlplane/kubeadm/controllers/scale.go +++ b/controlplane/kubeadm/controllers/scale.go @@ -18,9 +18,10 @@ package controllers import ( "context" + "strings" + "github.com/blang/semver" "sigs.k8s.io/cluster-api/util/collections" - "strings" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" diff --git a/controlplane/kubeadm/controllers/status.go b/controlplane/kubeadm/controllers/status.go index 9c26d525807c..ee851ee35c0f 100644 --- a/controlplane/kubeadm/controllers/status.go +++ b/controlplane/kubeadm/controllers/status.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "sigs.k8s.io/cluster-api/util/collections" "github.com/pkg/errors" diff --git a/controlplane/kubeadm/controllers/upgrade_test.go b/controlplane/kubeadm/controllers/upgrade_test.go index d09fca800247..36bdb49f785e 100644 --- a/controlplane/kubeadm/controllers/upgrade_test.go +++ b/controlplane/kubeadm/controllers/upgrade_test.go @@ -45,7 +45,6 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { cluster, kcp, genericMachineTemplate := createClusterWithControlPlane() cluster.Spec.ControlPlaneEndpoint.Host = Host cluster.Spec.ControlPlaneEndpoint.Port = 6443 - //kcp.Spec.Version = Version kcp.Spec.KubeadmConfigSpec.ClusterConfiguration = nil kcp.Spec.Replicas = pointer.Int32Ptr(1) setKCPHealthy(kcp) @@ -135,7 +134,6 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { cluster, kcp, tmpl := createClusterWithControlPlane() cluster.Spec.ControlPlaneEndpoint.Host = "nodomain.example.com1" cluster.Spec.ControlPlaneEndpoint.Port = 6443 - //kcp.Spec.Version = Version kcp.Spec.Replicas = pointer.Int32Ptr(3) kcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal = 0 setKCPHealthy(kcp) diff --git a/controlplane/kubeadm/internal/cluster.go b/controlplane/kubeadm/internal/cluster.go index 419bea8f9ceb..d9b57a904617 100644 --- a/controlplane/kubeadm/internal/cluster.go +++ b/controlplane/kubeadm/internal/cluster.go @@ -21,18 +21,16 @@ import ( "crypto/tls" "crypto/x509" "fmt" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "time" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/util/collections" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controllers/remote" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -42,7 +40,7 @@ const ( // ManagementCluster defines all behaviors necessary for something to function as a management cluster. type ManagementCluster interface { - ctrlclient.Reader + client.Reader GetMachinesForCluster(ctx context.Context, cluster *clusterv1.Cluster, filters ...collections.Func) (collections.Machines, error) GetMachinePoolsForCluster(ctx context.Context, cluster *clusterv1.Cluster) (*expv1.MachinePoolList, error) @@ -51,7 +49,7 @@ type ManagementCluster interface { // Management holds operations on the management cluster. type Management struct { - Client ctrlclient.Reader + Client client.Reader Tracker *remote.ClusterCacheTracker } @@ -64,13 +62,13 @@ type RemoteClusterConnectionError struct { func (e *RemoteClusterConnectionError) Error() string { return e.Name + ": " + e.Err.Error() } func (e *RemoteClusterConnectionError) Unwrap() error { return e.Err } -// Get implements ctrlclient.Reader. -func (m *Management) Get(ctx context.Context, key ctrlclient.ObjectKey, obj client.Object) error { +// Get implements client.Reader. +func (m *Management) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { return m.Client.Get(ctx, key, obj) } -// List implements ctrlclient.Reader. -func (m *Management) List(ctx context.Context, list client.ObjectList, opts ...ctrlclient.ListOption) error { +// List implements client.Reader. +func (m *Management) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return m.Client.List(ctx, list, opts...) } @@ -131,7 +129,7 @@ func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.O return nil, err } } else { - clientCert, err = m.getApiServerEtcdClientCert(ctx, clusterKey) + clientCert, err = m.getAPIServerEtcdClientCert(ctx, clusterKey) if err != nil { return nil, err } @@ -152,9 +150,9 @@ func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.O }, nil } -func (m *Management) getEtcdCAKeyPair(ctx context.Context, clusterKey ctrlclient.ObjectKey) ([]byte, []byte, error) { +func (m *Management) getEtcdCAKeyPair(ctx context.Context, clusterKey client.ObjectKey) ([]byte, []byte, error) { etcdCASecret := &corev1.Secret{} - etcdCAObjectKey := ctrlclient.ObjectKey{ + etcdCAObjectKey := client.ObjectKey{ Namespace: clusterKey.Namespace, Name: fmt.Sprintf("%s-etcd", clusterKey.Name), } @@ -169,9 +167,9 @@ func (m *Management) getEtcdCAKeyPair(ctx context.Context, clusterKey ctrlclient return crtData, keyData, nil } -func (m *Management) getApiServerEtcdClientCert(ctx context.Context, clusterKey ctrlclient.ObjectKey) (tls.Certificate, error) { +func (m *Management) getAPIServerEtcdClientCert(ctx context.Context, clusterKey client.ObjectKey) (tls.Certificate, error) { apiServerEtcdClientCertificateSecret := &corev1.Secret{} - apiServerEtcdClientCertificateObjectKey := ctrlclient.ObjectKey{ + apiServerEtcdClientCertificateObjectKey := client.ObjectKey{ Namespace: clusterKey.Namespace, Name: fmt.Sprintf("%s-apiserver-etcd-client", clusterKey.Name), } diff --git a/controlplane/kubeadm/internal/cluster_test.go b/controlplane/kubeadm/internal/cluster_test.go index 29ae72ff4da8..2c271957292a 100644 --- a/controlplane/kubeadm/internal/cluster_test.go +++ b/controlplane/kubeadm/internal/cluster_test.go @@ -24,10 +24,11 @@ import ( "crypto/x509/pkix" "fmt" "math/big" - "sigs.k8s.io/cluster-api/util/collections" "testing" "time" + "sigs.k8s.io/cluster-api/util/collections" + . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/log" diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index 167f0d7f5609..62ed22456230 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -17,9 +17,10 @@ limitations under the License. package internal import ( - "sigs.k8s.io/cluster-api/util/collections" "testing" + "sigs.k8s.io/cluster-api/util/collections" + . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" diff --git a/controlplane/kubeadm/internal/etcd/fake/client.go b/controlplane/kubeadm/internal/etcd/fake/client.go index ee6d9efb7d6d..10f75f944b2c 100644 --- a/controlplane/kubeadm/internal/etcd/fake/client.go +++ b/controlplane/kubeadm/internal/etcd/fake/client.go @@ -23,7 +23,7 @@ import ( "go.etcd.io/etcd/clientv3" ) -type FakeEtcdClient struct { +type FakeEtcdClient struct { //nolint:golint AlarmResponse *clientv3.AlarmResponse EtcdEndpoints []string MemberListResponse *clientv3.MemberListResponse diff --git a/controlplane/kubeadm/internal/etcd_client_generator.go b/controlplane/kubeadm/internal/etcd_client_generator.go index db653d32747b..d3e90d8f9b7f 100644 --- a/controlplane/kubeadm/internal/etcd_client_generator.go +++ b/controlplane/kubeadm/internal/etcd_client_generator.go @@ -28,8 +28,8 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/proxy" ) -// etcdClientGenerator generates etcd clients that connect to specific etcd members on particular control plane nodes. -type etcdClientGenerator struct { +// EtcdClientGenerator generates etcd clients that connect to specific etcd members on particular control plane nodes. +type EtcdClientGenerator struct { restConfig *rest.Config tlsConfig *tls.Config createClient clientCreator @@ -38,8 +38,8 @@ type etcdClientGenerator struct { type clientCreator func(ctx context.Context, endpoints []string) (*etcd.Client, error) // NewEtcdClientGenerator returns a new etcdClientGenerator instance. -func NewEtcdClientGenerator(restConfig *rest.Config, tlsConfig *tls.Config) *etcdClientGenerator { - ecg := &etcdClientGenerator{restConfig: restConfig, tlsConfig: tlsConfig} +func NewEtcdClientGenerator(restConfig *rest.Config, tlsConfig *tls.Config) *EtcdClientGenerator { + ecg := &EtcdClientGenerator{restConfig: restConfig, tlsConfig: tlsConfig} ecg.createClient = func(ctx context.Context, endpoints []string) (*etcd.Client, error) { p := proxy.Proxy{ @@ -56,7 +56,7 @@ func NewEtcdClientGenerator(restConfig *rest.Config, tlsConfig *tls.Config) *etc } // forFirstAvailableNode takes a list of nodes and returns a client for the first one that connects. -func (c *etcdClientGenerator) forFirstAvailableNode(ctx context.Context, nodeNames []string) (*etcd.Client, error) { +func (c *EtcdClientGenerator) forFirstAvailableNode(ctx context.Context, nodeNames []string) (*etcd.Client, error) { var errs []error for _, name := range nodeNames { endpoints := []string{staticPodName("etcd", name)} @@ -71,7 +71,7 @@ func (c *etcdClientGenerator) forFirstAvailableNode(ctx context.Context, nodeNam } // forLeader takes a list of nodes and returns a client to the leader node. -func (c *etcdClientGenerator) forLeader(ctx context.Context, nodeNames []string) (*etcd.Client, error) { +func (c *EtcdClientGenerator) forLeader(ctx context.Context, nodeNames []string) (*etcd.Client, error) { var errs []error for _, nodeName := range nodeNames { diff --git a/controlplane/kubeadm/internal/etcd_client_generator_test.go b/controlplane/kubeadm/internal/etcd_client_generator_test.go index 6281fba44ddd..299e7551be58 100644 --- a/controlplane/kubeadm/internal/etcd_client_generator_test.go +++ b/controlplane/kubeadm/internal/etcd_client_generator_test.go @@ -35,7 +35,7 @@ import ( ) var ( - subject *etcdClientGenerator + subject *EtcdClientGenerator ) func TestNewEtcdClientGenerator(t *testing.T) { diff --git a/controlplane/kubeadm/internal/proxy/conn.go b/controlplane/kubeadm/internal/proxy/conn.go index 44e475f72ed1..226eb2ef20ce 100644 --- a/controlplane/kubeadm/internal/proxy/conn.go +++ b/controlplane/kubeadm/internal/proxy/conn.go @@ -47,12 +47,12 @@ func (c Conn) Write(b []byte) (n int, err error) { return c.stream.Write(b) } -// Return a fake address representing the proxied connection. +// LocalAddr returns a fake address representing the proxied connection. func (c Conn) LocalAddr() net.Addr { return NewAddrFromConn(c) } -// Return a fake address representing the proxied connection. +// RemoteAddr returns a fake address representing the proxied connection. func (c Conn) RemoteAddr() net.Addr { return NewAddrFromConn(c) } diff --git a/controlplane/kubeadm/internal/workload_cluster.go b/controlplane/kubeadm/internal/workload_cluster.go index 3a4cb63317e3..23765bb08d1d 100644 --- a/controlplane/kubeadm/internal/workload_cluster.go +++ b/controlplane/kubeadm/internal/workload_cluster.go @@ -61,7 +61,10 @@ const ( var ( minVerKubeletSystemdDriver = semver.MustParse("1.21.0") - ErrControlPlaneMinNodes = errors.New("cluster has fewer than 2 control plane nodes; removing an etcd member is not supported") + + // ErrControlPlaneMinNodes signals that a cluster doesn't meet the minimum required nodes + // to remove an etcd memnber. + ErrControlPlaneMinNodes = errors.New("cluster has fewer than 2 control plane nodes; removing an etcd member is not supported") ) // WorkloadCluster defines all behaviors necessary to upgrade kubernetes on a workload cluster diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go index 8e6944ab9489..71c52dce6e93 100644 --- a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go @@ -31,7 +31,6 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -440,13 +439,13 @@ kind: ClusterConfiguration if tt.expectUpdates { // assert kubeadmConfigMap var expectedKubeadmConfigMap corev1.ConfigMap - g.Expect(testEnv.Get(ctx, ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &expectedKubeadmConfigMap)).To(Succeed()) + g.Expect(testEnv.Get(ctx, client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &expectedKubeadmConfigMap)).To(Succeed()) g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring(tt.kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag))) g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring(tt.kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageRepository))) // assert CoreDNS corefile var expectedConfigMap corev1.ConfigMap - g.Expect(testEnv.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) + g.Expect(testEnv.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) g.Expect(expectedConfigMap.Data).To(HaveLen(2)) g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile", "updated-core-file")) g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile-backup", expectedCorefile)) @@ -454,7 +453,7 @@ kind: ClusterConfiguration // assert CoreDNS deployment var actualDeployment appsv1.Deployment g.Eventually(func() string { - g.Expect(testEnv.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed()) + g.Expect(testEnv.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed()) return actualDeployment.Spec.Template.Spec.Containers[0].Image }, "5s").Should(Equal(tt.expectImage)) } @@ -605,7 +604,7 @@ func TestUpdateCoreDNSCorefile(t *testing.T) { g.Expect(fakeMigrator.migrateCalled).To(BeTrue()) var expectedConfigMap corev1.ConfigMap - g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) + g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) g.Expect(expectedConfigMap.Data).To(HaveLen(1)) g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile", originalCorefile)) }) @@ -636,7 +635,7 @@ func TestUpdateCoreDNSCorefile(t *testing.T) { g.Expect(err).To(HaveOccurred()) var expectedConfigMap corev1.ConfigMap - g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) + g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) g.Expect(expectedConfigMap.Data).To(HaveLen(2)) g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile", originalCorefile)) g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile-backup", originalCorefile)) @@ -683,11 +682,11 @@ func TestUpdateCoreDNSCorefile(t *testing.T) { } var actualDeployment appsv1.Deployment - g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed()) + g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed()) g.Expect(actualDeployment.Spec.Template.Spec.Volumes).To(ConsistOf(expectedVolume)) var expectedConfigMap corev1.ConfigMap - g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) + g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) g.Expect(expectedConfigMap.Data).To(HaveLen(2)) g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile", "updated-core-file")) g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile-backup", originalCorefile)) @@ -1061,7 +1060,7 @@ func TestUpdateCoreDNSDeployment(t *testing.T) { } var actualDeployment appsv1.Deployment - g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed()) + g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed()) // ensure the image is updated and the volumes point to the corefile g.Expect(actualDeployment.Spec.Template.Spec.Containers[0].Image).To(Equal(tt.info.ToImage)) g.Expect(actualDeployment.Spec.Template.Spec.Volumes).To(ConsistOf(expectedVolume)) diff --git a/controlplane/kubeadm/internal/workload_cluster_etcd.go b/controlplane/kubeadm/internal/workload_cluster_etcd.go index 664d45db4a3d..c538b66e5ebf 100644 --- a/controlplane/kubeadm/internal/workload_cluster_etcd.go +++ b/controlplane/kubeadm/internal/workload_cluster_etcd.go @@ -196,7 +196,8 @@ type EtcdMemberStatus struct { Responsive bool } -// EtcdStatus returns the current status of the etcd cluster +// EtcdMembers returns the current set of members in an etcd cluster. +// // NOTE: This methods uses control plane machines/nodes only to get in contact with etcd, // but then it relies on etcd as ultimate source of truth for the list of members. // This is intended to allow informed decisions on actions impacting etcd quorum. diff --git a/controlplane/kubeadm/internal/workload_cluster_rbac.go b/controlplane/kubeadm/internal/workload_cluster_rbac.go index 8d96ffd1430b..0b3d667cabd6 100644 --- a/controlplane/kubeadm/internal/workload_cluster_rbac.go +++ b/controlplane/kubeadm/internal/workload_cluster_rbac.go @@ -22,12 +22,10 @@ import ( "github.com/blang/semver" "github.com/pkg/errors" - rbac "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -50,7 +48,7 @@ const ( // EnsureResource creates a resoutce if the target resource doesn't exist. If the resource exists already, this function will ignore the resource instead. func (w *Workload) EnsureResource(ctx context.Context, obj client.Object) error { testObj := obj.DeepCopyObject().(client.Object) - key := ctrlclient.ObjectKeyFromObject(obj) + key := client.ObjectKeyFromObject(obj) if err := w.Client.Get(ctx, key, testObj); err != nil && !apierrors.IsNotFound(err) { return errors.Wrapf(err, "failed to determine if resource %s/%s already exists", key.Namespace, key.Name) } else if err == nil { @@ -67,12 +65,12 @@ func (w *Workload) EnsureResource(ctx context.Context, obj client.Object) error // AllowBootstrapTokensToGetNodes creates RBAC rules to allow Node Bootstrap Tokens to list nodes. func (w *Workload) AllowBootstrapTokensToGetNodes(ctx context.Context) error { - if err := w.EnsureResource(ctx, &rbac.ClusterRole{ + if err := w.EnsureResource(ctx, &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: GetNodesClusterRoleName, Namespace: metav1.NamespaceSystem, }, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ { Verbs: []string{"get"}, APIGroups: []string{""}, @@ -83,19 +81,19 @@ func (w *Workload) AllowBootstrapTokensToGetNodes(ctx context.Context) error { return err } - return w.EnsureResource(ctx, &rbac.ClusterRoleBinding{ + return w.EnsureResource(ctx, &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: GetNodesClusterRoleName, Namespace: metav1.NamespaceSystem, }, - RoleRef: rbac.RoleRef{ - APIGroup: rbac.GroupName, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: GetNodesClusterRoleName, }, - Subjects: []rbac.Subject{ + Subjects: []rbacv1.Subject{ { - Kind: rbac.GroupKind, + Kind: rbacv1.GroupKind, Name: NodeBootstrapTokenAuthGroup, }, }, @@ -114,25 +112,25 @@ func generateKubeletConfigRoleName(version semver.Version) string { // If the role binding already exists this function is a no-op. func (w *Workload) ReconcileKubeletRBACBinding(ctx context.Context, version semver.Version) error { roleName := generateKubeletConfigRoleName(version) - return w.EnsureResource(ctx, &rbac.RoleBinding{ + return w.EnsureResource(ctx, &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceSystem, Name: roleName, }, Subjects: []rbacv1.Subject{ { - APIGroup: rbac.GroupName, - Kind: rbac.GroupKind, + APIGroup: rbacv1.GroupName, + Kind: rbacv1.GroupKind, Name: NodesGroup, }, { - APIGroup: rbac.GroupName, - Kind: rbac.GroupKind, + APIGroup: rbacv1.GroupName, + Kind: rbacv1.GroupKind, Name: NodeBootstrapTokenAuthGroup, }, }, RoleRef: rbacv1.RoleRef{ - APIGroup: rbac.GroupName, + APIGroup: rbacv1.GroupName, Kind: "Role", Name: roleName, }, diff --git a/errors/clusters.go b/errors/clusters.go index 5a9fd6b61e0a..24d08a62b995 100644 --- a/errors/clusters.go +++ b/errors/clusters.go @@ -20,7 +20,7 @@ import ( "fmt" ) -// A more descriptive kind of error that represents an error condition that +// ClusterError is a more descriptive kind of error that represents an error condition that // should be set in the Cluster.Status. The "Reason" field is meant for short, // enum-style constants meant to be interpreted by clusters. The "Message" // field is meant to be read by humans. diff --git a/errors/consts.go b/errors/consts.go index e5bfaa8f6931..81dff5d31671 100644 --- a/errors/consts.go +++ b/errors/consts.go @@ -16,19 +16,21 @@ limitations under the License. package errors +type MachineStatusError string + // Constants aren't automatically generated for unversioned packages. // Instead share the same constant for all versioned packages. -type MachineStatusError string const ( - // Represents that the combination of configuration in the MachineSpec - // is not supported by this cluster. This is not a transient error, but + // InvalidConfigurationMachineError represents that the combination + // of configuration in the MachineSpec is not supported by this cluster. + // This is not a transient error, but // indicates a state that must be fixed before progress can be made. // // Example: the ProviderSpec specifies an instance type that doesn't exist,. InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration" - // This indicates that the MachineSpec has been updated in a way that + // UnsupportedChangeMachineError indicates that the MachineSpec has been updated in a way that // is not supported for reconciliation on this cluster. The spec may be // completely valid from a configuration standpoint, but the controller // does not support changing the real world state to match the new @@ -38,11 +40,11 @@ const ( // container runtime from docker to rkt. UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange" - // This generally refers to exceeding one's quota in a cloud provider, + // InsufficientResourcesMachineError generally refers to exceeding one's quota in a cloud provider, // or running out of physical machines in an on-premise environment. InsufficientResourcesMachineError MachineStatusError = "InsufficientResources" - // There was an error while trying to create a Node to match this + // CreateMachineError indicates an error while trying to create a Node to match this // Machine. This may indicate a transient problem that will be fixed // automatically with time, such as a service outage, or a terminal // error during creation that doesn't match a more specific @@ -51,14 +53,14 @@ const ( // Example: timeout trying to connect to GCE. CreateMachineError MachineStatusError = "CreateError" - // There was an error while trying to update a Node that this + // UpdateMachineError indicates an error while trying to update a Node that this // Machine represents. This may indicate a transient problem that will be // fixed automatically with time, such as a service outage, // // Example: error updating load balancers. UpdateMachineError MachineStatusError = "UpdateError" - // An error was encountered while trying to delete the Node that this + // DeleteMachineError indicates an error was encountered while trying to delete the Node that this // Machine represents. This could be a transient or terminal error, but // will only be observable if the provider's Machine controller has // added a finalizer to the object to more gracefully handle deletions. @@ -66,7 +68,7 @@ const ( // Example: cannot resolve EC2 IP address. DeleteMachineError MachineStatusError = "DeleteError" - // This error indicates that the machine did not join the cluster + // JoinClusterTimeoutMachineError indicates that the machine did not join the cluster // as a new node within the expected timeframe after instance // creation at the provider succeeded // @@ -104,7 +106,8 @@ const ( type MachineSetStatusError string const ( - // Represents that the combination of configuration in the MachineTemplateSpec + // InvalidConfigurationMachineSetError represents + // the combination of configuration in the MachineTemplateSpec // is not supported by this cluster. This is not a transient error, but // indicates a state that must be fixed before progress can be made. // @@ -115,7 +118,8 @@ const ( type MachinePoolStatusFailure string const ( - // Represents that the combination of configuration in the MachineTemplateSpec + // InvalidConfigurationMachinePoolError represemts + // the combination of configuration in the MachineTemplateSpec // is not supported by this cluster. This is not a transient error, but // indicates a state that must be fixed before progress can be made. // diff --git a/errors/deployer.go b/errors/deployer.go deleted file mode 100644 index 45fb789ada15..000000000000 --- a/errors/deployer.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import "github.com/pkg/errors" - -var ErrNotImplemented = errors.New("not implemented") diff --git a/errors/machines.go b/errors/machines.go index 8a499050bee1..cb2defc8065e 100644 --- a/errors/machines.go +++ b/errors/machines.go @@ -20,7 +20,7 @@ import ( "fmt" ) -// A more descriptive kind of error that represents an error condition that +// MachineError is a more descriptive kind of error that represents an error condition that // should be set in the Machine.Status. The "Reason" field is meant for short, // enum-style constants meant to be interpreted by machines. The "Message" // field is meant to be read by humans. diff --git a/exp/addons/api/v1alpha3/clusterresourceset_types.go b/exp/addons/api/v1alpha3/clusterresourceset_types.go index e888a0ab978f..b39a86dc0c40 100644 --- a/exp/addons/api/v1alpha3/clusterresourceset_types.go +++ b/exp/addons/api/v1alpha3/clusterresourceset_types.go @@ -53,6 +53,7 @@ type ClusterResourceSetSpec struct { // ClusterResourceSetResourceKind is a string representation of a ClusterResourceSet resource kind. type ClusterResourceSetResourceKind string +// Define the ClusterResourceSetResourceKind constants. const ( SecretClusterResourceSetResourceKind ClusterResourceSetResourceKind = "Secret" ConfigMapClusterResourceSetResourceKind ClusterResourceSetResourceKind = "ConfigMap" diff --git a/exp/addons/api/v1alpha3/condition_consts.go b/exp/addons/api/v1alpha3/condition_consts.go index 03ee3041632f..e61fba5ed698 100644 --- a/exp/addons/api/v1alpha3/condition_consts.go +++ b/exp/addons/api/v1alpha3/condition_consts.go @@ -37,6 +37,6 @@ const ( // RetrievingResourceFailedReason (Severity=Warning) documents at least one of the resources are not successfully retrieved. RetrievingResourceFailedReason = "RetrievingResourceFailed" - // WrongSecretType (Severity=Warning) documents at least one of the Secret's type in the resource list is not supported. + // WrongSecretTypeReason (Severity=Warning) documents at least one of the Secret's type in the resource list is not supported. WrongSecretTypeReason = "WrongSecretType" ) diff --git a/exp/addons/api/v1alpha4/clusterresourceset_types.go b/exp/addons/api/v1alpha4/clusterresourceset_types.go index e2e1daddaab3..e5b9e883f901 100644 --- a/exp/addons/api/v1alpha4/clusterresourceset_types.go +++ b/exp/addons/api/v1alpha4/clusterresourceset_types.go @@ -53,6 +53,7 @@ type ClusterResourceSetSpec struct { // ClusterResourceSetResourceKind is a string representation of a ClusterResourceSet resource kind. type ClusterResourceSetResourceKind string +// Define the ClusterResourceSetResourceKind constants. const ( SecretClusterResourceSetResourceKind ClusterResourceSetResourceKind = "Secret" ConfigMapClusterResourceSetResourceKind ClusterResourceSetResourceKind = "ConfigMap" diff --git a/exp/addons/api/v1alpha4/condition_consts.go b/exp/addons/api/v1alpha4/condition_consts.go index 97699a508019..5bfcc3910bc7 100644 --- a/exp/addons/api/v1alpha4/condition_consts.go +++ b/exp/addons/api/v1alpha4/condition_consts.go @@ -37,6 +37,6 @@ const ( // RetrievingResourceFailedReason (Severity=Warning) documents at least one of the resources are not successfully retrieved. RetrievingResourceFailedReason = "RetrievingResourceFailed" - // WrongSecretType (Severity=Warning) documents at least one of the Secret's type in the resource list is not supported. + // WrongSecretTypeReason (Severity=Warning) documents at least one of the Secret's type in the resource list is not supported. WrongSecretTypeReason = "WrongSecretType" ) diff --git a/exp/addons/controllers/clusterresourceset_controller.go b/exp/addons/controllers/clusterresourceset_controller.go index 4dc52aa780be..0dcd96bdfdec 100644 --- a/exp/addons/controllers/clusterresourceset_controller.go +++ b/exp/addons/controllers/clusterresourceset_controller.go @@ -50,6 +50,7 @@ import ( ) var ( + // ErrSecretTypeNotSupported signals that a Secret is not supported. ErrSecretTypeNotSupported = errors.New("unsupported secret type") ) diff --git a/exp/addons/controllers/clusterresourceset_controller_test.go b/exp/addons/controllers/clusterresourceset_controller_test.go index 25c9b17c1ced..b75753eed950 100644 --- a/exp/addons/controllers/clusterresourceset_controller_test.go +++ b/exp/addons/controllers/clusterresourceset_controller_test.go @@ -86,8 +86,8 @@ metadata: }, } t.Log("Creating a Secret and a ConfigMap with ConfigMap in their data field") - testEnv.Create(ctx, testConfigmap) - testEnv.Create(ctx, testSecret) + g.Expect(testEnv.Create(ctx, testConfigmap)).To(Succeed()) + g.Expect(testEnv.Create(ctx, testSecret)).To(Succeed()) } teardown := func(t *testing.T, g *WithT) { @@ -121,6 +121,15 @@ metadata: err := testEnv.Get(ctx, crsKey, crs) return err != nil }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + Name: configmapName, + Namespace: defaultNamespaceName, + }})).To(Succeed()) + g.Expect(testEnv.Delete(ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: defaultNamespaceName, + }})).To(Succeed()) } t.Run("Should reconcile a ClusterResourceSet with multiple resources when a cluster with matching label exists", func(t *testing.T) { diff --git a/exp/controllers/machinepool_controller_noderef.go b/exp/controllers/machinepool_controller_noderef.go index 2ac312ff5a35..53c61afaef29 100644 --- a/exp/controllers/machinepool_controller_noderef.go +++ b/exp/controllers/machinepool_controller_noderef.go @@ -21,29 +21,26 @@ import ( "fmt" "time" - "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/patch" - - ctrl "sigs.k8s.io/controller-runtime" - "github.com/pkg/errors" - apicorev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) var ( - ErrNoAvailableNodes = errors.New("cannot find nodes with matching ProviderIDs in ProviderIDList") + errNoAvailableNodes = errors.New("cannot find nodes with matching ProviderIDs in ProviderIDList") ) type getNodeReferencesResult struct { - references []apicorev1.ObjectReference + references []corev1.ObjectReference available int ready int } @@ -87,11 +84,11 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster * // Get the Node references. nodeRefsResult, err := r.getNodeReferences(ctx, clusterClient, mp.Spec.ProviderIDList) if err != nil { - if err == ErrNoAvailableNodes { + if err == errNoAvailableNodes { log.Info("Cannot assign NodeRefs to MachinePool, no matching Nodes") return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } - r.recorder.Event(mp, apicorev1.EventTypeWarning, "FailedSetNodeRef", err.Error()) + r.recorder.Event(mp, corev1.EventTypeWarning, "FailedSetNodeRef", err.Error()) return ctrl.Result{}, errors.Wrapf(err, "failed to get node references") } @@ -101,7 +98,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster * mp.Status.NodeRefs = nodeRefsResult.references log.Info("Set MachinePools's NodeRefs", "noderefs", mp.Status.NodeRefs) - r.recorder.Event(mp, apicorev1.EventTypeNormal, "SuccessfulSetNodeRefs", fmt.Sprintf("%+v", mp.Status.NodeRefs)) + r.recorder.Event(mp, corev1.EventTypeNormal, "SuccessfulSetNodeRefs", fmt.Sprintf("%+v", mp.Status.NodeRefs)) // Reconcile node annotations. for _, nodeRef := range nodeRefsResult.references { @@ -142,9 +139,9 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster * // deleteRetiredNodes deletes nodes that don't have a corresponding ProviderID in Spec.ProviderIDList. // A MachinePool infrastructure provider indicates an instance in the set has been deleted by // removing its ProviderID from the slice. -func (r *MachinePoolReconciler) deleteRetiredNodes(ctx context.Context, c client.Client, nodeRefs []apicorev1.ObjectReference, providerIDList []string) error { +func (r *MachinePoolReconciler) deleteRetiredNodes(ctx context.Context, c client.Client, nodeRefs []corev1.ObjectReference, providerIDList []string) error { log := ctrl.LoggerFrom(ctx, "providerIDList", len(providerIDList)) - nodeRefsMap := make(map[string]*apicorev1.Node, len(nodeRefs)) + nodeRefsMap := make(map[string]*corev1.Node, len(nodeRefs)) for _, nodeRef := range nodeRefs { node := &corev1.Node{} if err := c.Get(ctx, client.ObjectKey{Name: nodeRef.Name}, node); err != nil { @@ -180,8 +177,8 @@ func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, c client. log := ctrl.LoggerFrom(ctx, "providerIDList", len(providerIDList)) var ready, available int - nodeRefsMap := make(map[string]apicorev1.Node) - nodeList := apicorev1.NodeList{} + nodeRefsMap := make(map[string]corev1.Node) + nodeList := corev1.NodeList{} for { if err := c.List(ctx, &nodeList, client.Continue(nodeList.Continue)); err != nil { return getNodeReferencesResult{}, errors.Wrapf(err, "failed to List nodes") @@ -202,7 +199,7 @@ func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, c client. } } - var nodeRefs []apicorev1.ObjectReference + var nodeRefs []corev1.ObjectReference for _, providerID := range providerIDList { pid, err := noderefutil.NewProviderID(providerID) if err != nil { @@ -214,7 +211,7 @@ func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, c client. if nodeIsReady(&node) { ready++ } - nodeRefs = append(nodeRefs, apicorev1.ObjectReference{ + nodeRefs = append(nodeRefs, corev1.ObjectReference{ Kind: node.Kind, APIVersion: node.APIVersion, Name: node.Name, @@ -224,15 +221,15 @@ func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, c client. } if len(nodeRefs) == 0 { - return getNodeReferencesResult{}, ErrNoAvailableNodes + return getNodeReferencesResult{}, errNoAvailableNodes } return getNodeReferencesResult{nodeRefs, available, ready}, nil } -func nodeIsReady(node *apicorev1.Node) bool { +func nodeIsReady(node *corev1.Node) bool { for _, n := range node.Status.Conditions { - if n.Type == apicorev1.NodeReady { - return n.Status == apicorev1.ConditionTrue + if n.Type == corev1.NodeReady { + return n.Status == corev1.ConditionTrue } } return false diff --git a/exp/controllers/machinepool_controller_noderef_test.go b/exp/controllers/machinepool_controller_noderef_test.go index 2974e946e6fc..354043701ad5 100644 --- a/exp/controllers/machinepool_controller_noderef_test.go +++ b/exp/controllers/machinepool_controller_noderef_test.go @@ -134,7 +134,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) { name: "valid provider id, no node found", providerIDList: []string{"aws:///id-node-100"}, expected: nil, - err: ErrNoAvailableNodes, + err: errNoAvailableNodes, }, } diff --git a/feature/feature.go b/feature/feature.go index e9b288a7062b..bed63a6c7a15 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -29,9 +29,13 @@ const ( // // alpha: v1.X // MyFeature featuregate.Feature = "MyFeature". + // MachinePool is a feature gate for MachinePool functionality. + // // alpha: v0.3 MachinePool featuregate.Feature = "MachinePool" + // ClusterResourceSet is a feature gate for the ClusterResourceSet functionality. + // // alpha: v0.3 ClusterResourceSet featuregate.Feature = "ClusterResourceSet" ) diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index db10d08e250f..0351441a1a11 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -290,7 +290,7 @@ func downloadToTmpFile(url string) string { defer tmpFile.Close() // Get the data - resp, err := http.Get(url) + resp, err := http.Get(url) //nolint:gosec Expect(err).ToNot(HaveOccurred(), "failed to get clusterctl") defer resp.Body.Close() diff --git a/test/e2e/kcp_adoption.go b/test/e2e/kcp_adoption.go index 73fc88facbe0..f9c5de708566 100644 --- a/test/e2e/kcp_adoption.go +++ b/test/e2e/kcp_adoption.go @@ -40,7 +40,7 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) -// KCPUpgradeSpecInput is the input for KCPUpgradeSpec. +// KCPAdoptionSpecInput is the input for KCPAdoptionSpec. type KCPAdoptionSpecInput struct { E2EConfig *clusterctl.E2EConfig ClusterctlConfigPath string diff --git a/test/e2e/node_drain_timeout.go b/test/e2e/node_drain_timeout.go index c327fcccac4a..e3441dfdb192 100644 --- a/test/e2e/node_drain_timeout.go +++ b/test/e2e/node_drain_timeout.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/cluster-api/util" ) -// NodeDrainTimeoutInput is the input for NodeDrainTimeoutSpec. +// NodeDrainTimeoutSpecInput is the input for NodeDrainTimeoutSpec. type NodeDrainTimeoutSpecInput struct { E2EConfig *clusterctl.E2EConfig ClusterctlConfigPath string diff --git a/test/framework/bootstrap/kind_provider.go b/test/framework/bootstrap/kind_provider.go index 7830292322cb..4e900e02d9c6 100644 --- a/test/framework/bootstrap/kind_provider.go +++ b/test/framework/bootstrap/kind_provider.go @@ -35,18 +35,18 @@ const ( // KindClusterOption is a NewKindClusterProvider option. type KindClusterOption interface { - apply(*kindClusterProvider) + apply(*KindClusterProvider) } -type kindClusterOptionAdapter func(*kindClusterProvider) +type kindClusterOptionAdapter func(*KindClusterProvider) -func (adapter kindClusterOptionAdapter) apply(kindClusterProvider *kindClusterProvider) { +func (adapter kindClusterOptionAdapter) apply(kindClusterProvider *KindClusterProvider) { adapter(kindClusterProvider) } // WithNodeImage implements a New Option that instruct the kindClusterProvider to use a specific node image / Kubernetes version. func WithNodeImage(image string) KindClusterOption { - return kindClusterOptionAdapter(func(k *kindClusterProvider) { + return kindClusterOptionAdapter(func(k *KindClusterProvider) { k.nodeImage = image }) } @@ -54,7 +54,7 @@ func WithNodeImage(image string) KindClusterOption { // WithDockerSockMount implements a New Option that instruct the kindClusterProvider to mount /var/run/docker.sock into // the new kind cluster. func WithDockerSockMount() KindClusterOption { - return kindClusterOptionAdapter(func(k *kindClusterProvider) { + return kindClusterOptionAdapter(func(k *KindClusterProvider) { k.withDockerSock = true }) } @@ -62,16 +62,16 @@ func WithDockerSockMount() KindClusterOption { // WithIPv6Family implements a New Option that instruct the kindClusterProvider to set the IPFamily to IPv6 in // the new kind cluster. func WithIPv6Family() KindClusterOption { - return kindClusterOptionAdapter(func(k *kindClusterProvider) { + return kindClusterOptionAdapter(func(k *KindClusterProvider) { k.ipFamily = clusterv1.IPv6IPFamily }) } // NewKindClusterProvider returns a ClusterProvider that can create a kind cluster. -func NewKindClusterProvider(name string, options ...KindClusterOption) *kindClusterProvider { +func NewKindClusterProvider(name string, options ...KindClusterOption) *KindClusterProvider { Expect(name).ToNot(BeEmpty(), "name is required for NewKindClusterProvider") - clusterProvider := &kindClusterProvider{ + clusterProvider := &KindClusterProvider{ name: name, } for _, option := range options { @@ -80,8 +80,8 @@ func NewKindClusterProvider(name string, options ...KindClusterOption) *kindClus return clusterProvider } -// kindClusterProvider implements a ClusterProvider that can create a kind cluster. -type kindClusterProvider struct { +// KindClusterProvider implements a ClusterProvider that can create a kind cluster. +type KindClusterProvider struct { name string withDockerSock bool kubeconfigPath string @@ -90,7 +90,7 @@ type kindClusterProvider struct { } // Create a Kubernetes cluster using kind. -func (k *kindClusterProvider) Create(ctx context.Context) { +func (k *KindClusterProvider) Create(ctx context.Context) { Expect(ctx).NotTo(BeNil(), "ctx is required for Create") // Sets the kubeconfig path to a temp file. @@ -106,7 +106,7 @@ func (k *kindClusterProvider) Create(ctx context.Context) { // createKindCluster calls the kind library taking care of passing options for: // - use a dedicated kubeconfig file (test should not alter the user environment) // - if required, mount /var/run/docker.sock. -func (k *kindClusterProvider) createKindCluster() { +func (k *KindClusterProvider) createKindCluster() { kindCreateOptions := []kind.CreateOption{ kind.CreateWithKubeconfigPath(k.kubeconfigPath), } @@ -155,12 +155,12 @@ func setDockerSockConfig(cfg *kindv1.Cluster) { } // GetKubeconfigPath returns the path to the kubeconfig file for the cluster. -func (k *kindClusterProvider) GetKubeconfigPath() string { +func (k *KindClusterProvider) GetKubeconfigPath() string { return k.kubeconfigPath } // Dispose the kind cluster and its kubeconfig file. -func (k *kindClusterProvider) Dispose(ctx context.Context) { +func (k *KindClusterProvider) Dispose(ctx context.Context) { Expect(ctx).NotTo(BeNil(), "ctx is required for Dispose") if err := kind.NewProvider().Delete(k.name, k.kubeconfigPath); err != nil { diff --git a/test/framework/cluster_proxy.go b/test/framework/cluster_proxy.go index ab058284915c..bb152e8bebbe 100644 --- a/test/framework/cluster_proxy.go +++ b/test/framework/cluster_proxy.go @@ -24,11 +24,9 @@ import ( "os" "path" goruntime "runtime" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "strings" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" @@ -36,6 +34,7 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/test/framework/exec" "sigs.k8s.io/cluster-api/test/framework/internal/log" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/framework/clusterctl/client.go b/test/framework/clusterctl/client.go index cca204cd9eb8..a7544bbc66c8 100644 --- a/test/framework/clusterctl/client.go +++ b/test/framework/clusterctl/client.go @@ -190,8 +190,7 @@ func ConfigCluster(ctx context.Context, input ConfigClusterInput) []byte { yaml, err := template.Yaml() Expect(err).ToNot(HaveOccurred(), "Failed to generate yaml for the workload cluster template") - log.WriteString(string(yaml)) - + _, _ = log.WriteString(string(yaml)) return yaml } diff --git a/test/framework/clusterctl/e2e_config.go b/test/framework/clusterctl/e2e_config.go index 161fdc7f25dd..23b1c025da89 100644 --- a/test/framework/clusterctl/e2e_config.go +++ b/test/framework/clusterctl/e2e_config.go @@ -496,7 +496,7 @@ func fileExists(filename string) bool { return !info.IsDir() } -// InfraProvider returns the infrastructure provider selected for running this E2E test. +// InfrastructureProviders returns the infrastructure provider selected for running this E2E test. func (c *E2EConfig) InfrastructureProviders() []string { InfraProviders := []string{} for _, provider := range c.Providers { diff --git a/test/framework/clusterctl/logger/logger.go b/test/framework/clusterctl/logger/logger.go index c957b1b59561..1160ed773185 100644 --- a/test/framework/clusterctl/logger/logger.go +++ b/test/framework/clusterctl/logger/logger.go @@ -54,7 +54,7 @@ func (l *logger) Error(err error, msg string, kvs ...interface{}) { panic("using log.Error is deprecated in clusterctl") } -func (l *logger) V(level int) logr.InfoLogger { +func (l *logger) V(level int) logr.Logger { nl := l.clone() return nl } diff --git a/test/framework/clusterctl/repository.go b/test/framework/clusterctl/repository.go index 36a4383249f2..35f0b8034bb3 100644 --- a/test/framework/clusterctl/repository.go +++ b/test/framework/clusterctl/repository.go @@ -42,7 +42,8 @@ const ( httpsURIScheme = "https" ) -// Provides helpers for managing a clusterctl local repository to be used for running e2e tests in isolation. +// RepositoryFileTransformation is a helpers for managing a clusterctl +// local repository to be used for running e2e tests in isolation. type RepositoryFileTransformation func([]byte) ([]byte, error) // CreateRepositoryInput is the input for CreateRepository. diff --git a/test/framework/controlplane_helpers.go b/test/framework/controlplane_helpers.go index a3d79d402146..095d90901f49 100644 --- a/test/framework/controlplane_helpers.go +++ b/test/framework/controlplane_helpers.go @@ -108,7 +108,7 @@ func WaitForKubeadmControlPlaneMachinesToExist(ctx context.Context, input WaitFo }, intervals...).Should(Equal(int(*input.ControlPlane.Spec.Replicas))) } -// WaitForOneKubeadmControlPlaneMachinesToExistInput is the input for WaitForKubeadmControlPlaneMachinesToExist. +// WaitForOneKubeadmControlPlaneMachineToExistInput is the input for WaitForKubeadmControlPlaneMachinesToExist. type WaitForOneKubeadmControlPlaneMachineToExistInput struct { Lister Lister Cluster *clusterv1.Cluster diff --git a/test/framework/docker_logcollector.go b/test/framework/docker_logcollector.go index 5fd730e52f23..88b1e982adac 100644 --- a/test/framework/docker_logcollector.go +++ b/test/framework/docker_logcollector.go @@ -19,14 +19,14 @@ package framework import ( "context" "fmt" - kerrors "k8s.io/apimachinery/pkg/util/errors" "os" osExec "os/exec" "path/filepath" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "strings" + kerrors "k8s.io/apimachinery/pkg/util/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/kind/pkg/errors" "sigs.k8s.io/kind/pkg/exec" diff --git a/test/framework/exec/kubectl.go b/test/framework/exec/kubectl.go index 7080b4dbe352..9fd54de36045 100644 --- a/test/framework/exec/kubectl.go +++ b/test/framework/exec/kubectl.go @@ -22,6 +22,8 @@ import ( "fmt" ) +// KubectlApply shells out to kubectl apply. +// // TODO: Remove this usage of kubectl and replace with a function from apply.go using the controller-runtime client. func KubectlApply(ctx context.Context, kubeconfigPath string, resources []byte, args ...string) error { aargs := append([]string{"apply", "--kubeconfig", kubeconfigPath, "-f", "-"}, args...) @@ -40,6 +42,7 @@ func KubectlApply(ctx context.Context, kubeconfigPath string, resources []byte, return nil } +// KubectlWait shells out to kubectl wait. func KubectlWait(ctx context.Context, kubeconfigPath string, args ...string) error { wargs := append([]string{"wait", "--kubeconfig", kubeconfigPath}, args...) wait := NewCommand( diff --git a/test/framework/kubetest/run.go b/test/framework/kubetest/run.go index 9b49aa386228..3e096e27168c 100644 --- a/test/framework/kubetest/run.go +++ b/test/framework/kubetest/run.go @@ -39,6 +39,7 @@ const ( ciArtifactImage = "gcr.io/k8s-staging-ci-images/conformance" ) +// Export Ginkgo constants. const ( DefaultGinkgoNodes = 1 DefaultGinkoSlowSpecThreshold = 120 diff --git a/test/framework/machinehealthcheck_helpers.go b/test/framework/machinehealthcheck_helpers.go index 0e8922e39643..3b168711e714 100644 --- a/test/framework/machinehealthcheck_helpers.go +++ b/test/framework/machinehealthcheck_helpers.go @@ -38,7 +38,7 @@ type DiscoverMachineHealthCheckAndWaitForRemediationInput struct { WaitForMachineRemediation []interface{} } -// DiscoverMachineHealthCheckAndWaitForRemediation patches an unhealthy node condition to one node observed by the Machine Health Check and then wait for remediation. +// DiscoverMachineHealthChecksAndWaitForRemediation patches an unhealthy node condition to one node observed by the Machine Health Check and then wait for remediation. func DiscoverMachineHealthChecksAndWaitForRemediation(ctx context.Context, input DiscoverMachineHealthCheckAndWaitForRemediationInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoverMachineHealthChecksAndWaitForRemediation") Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling DiscoverMachineHealthChecksAndWaitForRemediation") diff --git a/test/framework/machinepool_helpers.go b/test/framework/machinepool_helpers.go index 0df4fe7d2446..6cc22851ea74 100644 --- a/test/framework/machinepool_helpers.go +++ b/test/framework/machinepool_helpers.go @@ -18,6 +18,7 @@ package framework import ( "context" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/cluster-api/test/framework/internal/log" diff --git a/test/framework/machines.go b/test/framework/machines.go index 102d88bbad4b..8478f0b17424 100644 --- a/test/framework/machines.go +++ b/test/framework/machines.go @@ -27,13 +27,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// WaitForClusterMachineNodesRefsInput is the input for WaitForClusterMachineNodesRefs. +// WaitForClusterMachineNodeRefsInput is the input for WaitForClusterMachineNodesRefs. type WaitForClusterMachineNodeRefsInput struct { GetLister GetLister Cluster *clusterv1.Cluster } -// WaitForClusterMachineNodesRefs waits until all nodes associated with a machine deployment exist. +// WaitForClusterMachineNodeRefs waits until all nodes associated with a machine deployment exist. func WaitForClusterMachineNodeRefs(ctx context.Context, input WaitForClusterMachineNodeRefsInput, intervals ...interface{}) { By("Waiting for the machines' nodes to exist") machines := &clusterv1.MachineList{} diff --git a/test/framework/namespace_helpers.go b/test/framework/namespace_helpers.go index c237f9642803..118854c8aac6 100644 --- a/test/framework/namespace_helpers.go +++ b/test/framework/namespace_helpers.go @@ -145,12 +145,12 @@ func WatchNamespaceEvents(ctx context.Context, input WatchNamespaceEventsInput) eventInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { e := obj.(*corev1.Event) - f.WriteString(fmt.Sprintf("[New Event] %s/%s\n\tresource: %s/%s/%s\n\treason: %s\n\tmessage: %s\n\tfull: %#v\n", + _, _ = f.WriteString(fmt.Sprintf("[New Event] %s/%s\n\tresource: %s/%s/%s\n\treason: %s\n\tmessage: %s\n\tfull: %#v\n", e.Namespace, e.Name, e.InvolvedObject.APIVersion, e.InvolvedObject.Kind, e.InvolvedObject.Name, e.Reason, e.Message, e)) }, UpdateFunc: func(_, obj interface{}) { e := obj.(*corev1.Event) - f.WriteString(fmt.Sprintf("[Updated Event] %s/%s\n\tresource: %s/%s/%s\n\treason: %s\n\tmessage: %s\n\tfull: %#v\n", + _, _ = f.WriteString(fmt.Sprintf("[Updated Event] %s/%s\n\tresource: %s/%s/%s\n\treason: %s\n\tmessage: %s\n\tfull: %#v\n", e.Namespace, e.Name, e.InvolvedObject.APIVersion, e.InvolvedObject.Kind, e.InvolvedObject.Name, e.Reason, e.Message, e)) }, DeleteFunc: func(obj interface{}) {}, diff --git a/test/framework/pod_helpers.go b/test/framework/pod_helpers.go index 10d3dd3c42e7..1c9f4382a2e9 100644 --- a/test/framework/pod_helpers.go +++ b/test/framework/pod_helpers.go @@ -27,14 +27,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// podListCondition is a type that operates a condition on a Pod. -type podListCondition func(p *corev1.PodList) error +// PodListCondition is a type that operates a condition on a Pod. +type PodListCondition func(p *corev1.PodList) error // WaitForPodListConditionInput is the input args for WaitForPodListCondition. type WaitForPodListConditionInput struct { Lister Lister ListOptions *client.ListOptions - Condition podListCondition + Condition PodListCondition } // WaitForPodListCondition waits for the specified condition to be true for all @@ -58,7 +58,7 @@ func WaitForPodListCondition(ctx context.Context, input WaitForPodListConditionI // EtcdImageTagCondition returns a podListCondition that ensures the pod image // contains the specified image tag. -func EtcdImageTagCondition(expectedTag string, expectedCount int) podListCondition { +func EtcdImageTagCondition(expectedTag string, expectedCount int) PodListCondition { return func(pl *corev1.PodList) error { countWithCorrectTag := 0 for _, pod := range pl.Items { @@ -84,7 +84,7 @@ func EtcdImageTagCondition(expectedTag string, expectedCount int) podListConditi // PhasePodCondition is a podListCondition ensuring that pods are in the expected // pod phase. -func PhasePodCondition(expectedPhase corev1.PodPhase) podListCondition { +func PhasePodCondition(expectedPhase corev1.PodPhase) PodListCondition { return func(pl *corev1.PodList) error { for _, pod := range pl.Items { if pod.Status.Phase != expectedPhase { diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index 4c14607015fc..2bce88931205 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -46,7 +46,6 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" kcpv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" addonv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1alpha4" - crs "sigs.k8s.io/cluster-api/exp/addons/api/v1alpha4" expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/util/kubeconfig" utilyaml "sigs.k8s.io/cluster-api/util/yaml" @@ -77,7 +76,6 @@ func init() { utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(bootstrapv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expv1.AddToScheme(scheme.Scheme)) - utilruntime.Must(crs.AddToScheme(scheme.Scheme)) utilruntime.Must(addonv1.AddToScheme(scheme.Scheme)) utilruntime.Must(kcpv1.AddToScheme(scheme.Scheme)) utilruntime.Must(admissionv1.AddToScheme(scheme.Scheme)) @@ -173,7 +171,7 @@ func NewTestEnvironment() *TestEnvironment { if err := (&kcpv1.KubeadmControlPlane{}).SetupWebhookWithManager(mgr); err != nil { klog.Fatalf("unable to create webhook: %+v", err) } - if err := (&crs.ClusterResourceSet{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&addonv1.ClusterResourceSet{}).SetupWebhookWithManager(mgr); err != nil { klog.Fatalf("unable to create webhook for crs: %+v", err) } if err := (&expv1.MachinePool{}).SetupWebhookWithManager(mgr); err != nil { diff --git a/test/infrastructure/docker/cloudinit/writefiles.go b/test/infrastructure/docker/cloudinit/writefiles.go index 58affb0d2307..ab2adccb2c80 100644 --- a/test/infrastructure/docker/cloudinit/writefiles.go +++ b/test/infrastructure/docker/cloudinit/writefiles.go @@ -60,7 +60,7 @@ func (a *writeFilesAction) Commands() ([]Cmd, error) { commands := make([]Cmd, 0) for _, f := range a.Files { // Fix attributes and apply defaults - path := fixPath(f.Path) //NB. the real cloud init module for writes files converts path into absolute paths; this is not possible here... + path := fixPath(f.Path) // NB. the real cloud init module for writes files converts path into absolute paths; this is not possible here... encodings := fixEncoding(f.Encoding) owner := fixOwner(f.Owner) permissions := fixPermissions(f.Permissions) diff --git a/util/annotations/helpers_test.go b/util/annotations/helpers_test.go index 67376ef0a013..b6f7c119e860 100644 --- a/util/annotations/helpers_test.go +++ b/util/annotations/helpers_test.go @@ -17,10 +17,11 @@ limitations under the License. package annotations import ( + "testing" + . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" ) func TestAddAnnotations(t *testing.T) { diff --git a/util/certs/consts.go b/util/certs/consts.go index 8e3ceddbb9e8..cdeef14aa3de 100644 --- a/util/certs/consts.go +++ b/util/certs/consts.go @@ -25,7 +25,7 @@ const ( // DefaultCertDuration is the default lifespan used when creating certificates. DefaultCertDuration = time.Hour * 24 * 365 - // When client certificates have less than ClientCertificateRenewalDuration - // left before expiry, they will be regenerated. + // ClientCertificateRenewalDuration determines when a certificate should + // be regerenated. ClientCertificateRenewalDuration = DefaultCertDuration / 2 ) diff --git a/util/collections/machine_filters_test.go b/util/collections/machine_filters_test.go index 4a5fc2ed49d4..7dc45137ebeb 100644 --- a/util/collections/machine_filters_test.go +++ b/util/collections/machine_filters_test.go @@ -17,12 +17,13 @@ limitations under the License. package collections_test import ( + "testing" + "time" + "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/util/collections" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "testing" - "time" . "github.com/onsi/gomega" "sigs.k8s.io/cluster-api/util/conditions" diff --git a/util/conditions/getter_test.go b/util/conditions/getter_test.go index e2fc38f6f05b..9aef2a8c37e2 100644 --- a/util/conditions/getter_test.go +++ b/util/conditions/getter_test.go @@ -131,7 +131,7 @@ func TestSummary(t *testing.T) { foo := TrueCondition("foo") bar := FalseCondition("bar", "reason falseInfo1", clusterv1.ConditionSeverityInfo, "message falseInfo1") baz := FalseCondition("baz", "reason falseInfo2", clusterv1.ConditionSeverityInfo, "message falseInfo2") - existingReady := FalseCondition(clusterv1.ReadyCondition, "reason falseError1", clusterv1.ConditionSeverityError, "message falseError1") //NB. existing ready has higher priority than other conditions + existingReady := FalseCondition(clusterv1.ReadyCondition, "reason falseError1", clusterv1.ConditionSeverityError, "message falseError1") // NB. existing ready has higher priority than other conditions tests := []struct { name string @@ -239,7 +239,7 @@ func TestSummary(t *testing.T) { func TestAggregate(t *testing.T) { ready1 := TrueCondition(clusterv1.ReadyCondition) ready2 := FalseCondition(clusterv1.ReadyCondition, "reason falseInfo1", clusterv1.ConditionSeverityInfo, "message falseInfo1") - bar := FalseCondition("bar", "reason falseError1", clusterv1.ConditionSeverityError, "message falseError1") //NB. bar has higher priority than other conditions + bar := FalseCondition("bar", "reason falseError1", clusterv1.ConditionSeverityError, "message falseError1") // NB. bar has higher priority than other conditions tests := []struct { name string diff --git a/util/conditions/matcher.go b/util/conditions/matcher.go index 17fef9d5c98c..d9415b8fe4ff 100644 --- a/util/conditions/matcher.go +++ b/util/conditions/matcher.go @@ -19,7 +19,7 @@ package conditions import ( "fmt" - . "github.com/onsi/gomega" + "github.com/onsi/gomega" "github.com/onsi/gomega/types" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" ) @@ -41,7 +41,7 @@ func (m matchConditions) Match(actual interface{}) (success bool, err error) { elems = append(elems, MatchCondition(condition)) } - return ConsistOf(elems).Match(actual) + return gomega.ConsistOf(elems).Match(actual) } func (m matchConditions) FailureMessage(actual interface{}) (message string) { @@ -69,23 +69,23 @@ func (m matchCondition) Match(actual interface{}) (success bool, err error) { return false, fmt.Errorf("actual should be of type Condition") } - ok, err = Equal(m.expected.Type).Match(actualCondition.Type) + ok, err = gomega.Equal(m.expected.Type).Match(actualCondition.Type) if !ok { return ok, err } - ok, err = Equal(m.expected.Status).Match(actualCondition.Status) + ok, err = gomega.Equal(m.expected.Status).Match(actualCondition.Status) if !ok { return ok, err } - ok, err = Equal(m.expected.Severity).Match(actualCondition.Severity) + ok, err = gomega.Equal(m.expected.Severity).Match(actualCondition.Severity) if !ok { return ok, err } - ok, err = Equal(m.expected.Reason).Match(actualCondition.Reason) + ok, err = gomega.Equal(m.expected.Reason).Match(actualCondition.Reason) if !ok { return ok, err } - ok, err = Equal(m.expected.Message).Match(actualCondition.Message) + ok, err = gomega.Equal(m.expected.Message).Match(actualCondition.Message) if !ok { return ok, err } diff --git a/util/defaulting/defaulting.go b/util/defaulting/defaulting.go index fd637069c6f8..88f17c24626b 100644 --- a/util/defaulting/defaulting.go +++ b/util/defaulting/defaulting.go @@ -26,7 +26,7 @@ import ( // DefaultingValidator interface is for objects that define both defaulting // and validating webhooks. -type DefaultingValidator interface { +type DefaultingValidator interface { //nolint:golint admission.Defaulter admission.Validator } diff --git a/util/failuredomains/failure_domains_test.go b/util/failuredomains/failure_domains_test.go index e0bce8cac728..d6b358d8fcca 100644 --- a/util/failuredomains/failure_domains_test.go +++ b/util/failuredomains/failure_domains_test.go @@ -17,9 +17,10 @@ limitations under the License. package failuredomains import ( - "sigs.k8s.io/cluster-api/util/collections" "testing" + "sigs.k8s.io/cluster-api/util/collections" + . "github.com/onsi/gomega" "k8s.io/utils/pointer" diff --git a/util/kubeconfig/kubeconfig.go b/util/kubeconfig/kubeconfig.go index aa3d675e4638..4b4b91f4c546 100644 --- a/util/kubeconfig/kubeconfig.go +++ b/util/kubeconfig/kubeconfig.go @@ -38,6 +38,7 @@ import ( ) var ( + // ErrDependentCertificateNotFound signals that a CA secret could not be found. ErrDependentCertificateNotFound = errors.New("could not find secret ca") ) diff --git a/util/secret/certificates.go b/util/secret/certificates.go index e15b1387649a..73b926c9471c 100644 --- a/util/secret/certificates.go +++ b/util/secret/certificates.go @@ -43,6 +43,7 @@ import ( const ( rootOwnerValue = "root:root" + // DefaultCertificatesDir is the default directory where Kubernetes stores its PKI information. DefaultCertificatesDir = "/etc/kubernetes/pki" ) diff --git a/util/secret/certificates_test.go b/util/secret/certificates_test.go index be474c8b1291..e5ca3f6b3f6b 100644 --- a/util/secret/certificates_test.go +++ b/util/secret/certificates_test.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/cluster-api/util/secret" ) -func TesNewControlPlaneJoinCerts_Stacked(t *testing.T) { +func TestNewControlPlaneJoinCertsStacked(t *testing.T) { g := NewWithT(t) config := &bootstrapv1.ClusterConfiguration{} @@ -33,7 +33,7 @@ func TesNewControlPlaneJoinCerts_Stacked(t *testing.T) { g.Expect(certs.GetByPurpose(secret.EtcdCA).KeyFile).NotTo(BeEmpty()) } -func TestNewControlPlaneJoinCerts_External(t *testing.T) { +func TestNewControlPlaneJoinCertsExternal(t *testing.T) { g := NewWithT(t) config := &bootstrapv1.ClusterConfiguration{ diff --git a/util/util.go b/util/util.go index f550874f0de3..ef56c443f66e 100644 --- a/util/util.go +++ b/util/util.go @@ -29,7 +29,6 @@ import ( "github.com/gobuffalo/flect" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,8 +52,14 @@ const ( ) var ( - rnd = rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec - ErrNoCluster = fmt.Errorf("no %q label present", clusterv1.ClusterLabelName) + rnd = rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec + + // ErrNoCluster is returned when the cluster + // label could not be found on the object passed in. + ErrNoCluster = fmt.Errorf("no %q label present", clusterv1.ClusterLabelName) + + // ErrUnstructuredFieldNotFound determines that a field + // in an unstructured object could not be found. ErrUnstructuredFieldNotFound = fmt.Errorf("field not found") ) @@ -127,10 +132,10 @@ func IsControlPlaneMachine(machine *clusterv1.Machine) bool { } // IsNodeReady returns true if a node is ready. -func IsNodeReady(node *v1.Node) bool { +func IsNodeReady(node *corev1.Node) bool { for _, condition := range node.Status.Conditions { - if condition.Type == v1.NodeReady { - return condition.Status == v1.ConditionTrue + if condition.Type == corev1.NodeReady { + return condition.Status == corev1.ConditionTrue } } diff --git a/util/version/version_test.go b/util/version/version_test.go index 40d5a163a823..3ed67edc5ed2 100644 --- a/util/version/version_test.go +++ b/util/version/version_test.go @@ -17,9 +17,10 @@ limitations under the License. package version import ( + "testing" + "github.com/blang/semver" . "github.com/onsi/gomega" - "testing" ) func TestParseMajorMinorPatch(t *testing.T) { diff --git a/util/yaml/yaml_test.go b/util/yaml/yaml_test.go index e7170471ca46..d20c2a4889b7 100644 --- a/util/yaml/yaml_test.go +++ b/util/yaml/yaml_test.go @@ -351,7 +351,7 @@ func TestToUnstructured(t *testing.T) { { name: "empty object are dropped", args: args{ - rawyaml: []byte("---\n" + //empty objects before + rawyaml: []byte("---\n" + // empty objects before "---\n" + "---\n" + "apiVersion: v1\n" + @@ -361,7 +361,7 @@ func TestToUnstructured(t *testing.T) { "---\n" + "apiVersion: v1\n" + "kind: Secret\n" + - "---\n" + //empty objects after + "---\n" + // empty objects after "---\n" + "---\n"), },