From 4600a494731ac250c781c7253dc32978ed251a5d Mon Sep 17 00:00:00 2001 From: Oscar Utbult Date: Sat, 17 Sep 2022 18:38:44 +0200 Subject: [PATCH] grammar: replace all occurrences of "the the" with "the" --- Dockerfile | 2 +- Makefile | 6 +++--- cmd/clusterctl/Dockerfile | 2 +- cmd/clusterctl/client/client.go | 4 ++-- cmd/clusterctl/client/cluster/components.go | 2 +- cmd/clusterctl/client/cluster/mover.go | 4 ++-- cmd/clusterctl/client/cluster/objectgraph.go | 2 +- cmd/clusterctl/client/cluster/upgrader.go | 4 ++-- cmd/clusterctl/internal/test/fake_proxy.go | 2 +- docs/proposals/20200804-windows-support.md | 4 ++-- internal/controllers/topology/cluster/scope/state.go | 2 +- test/e2e/e2e_suite_test.go | 2 +- test/extension/Dockerfile | 2 +- test/framework/alltypes_helpers.go | 2 +- test/framework/controlplane_helpers.go | 2 +- util/conditions/getter.go | 2 +- util/conditions/merge.go | 8 ++++---- util/conditions/setter.go | 4 ++-- 18 files changed, 28 insertions(+), 28 deletions(-) diff --git a/Dockerfile b/Dockerfile index bac182f842e2..118d2f9fb921 100644 --- a/Dockerfile +++ b/Dockerfile @@ -45,7 +45,7 @@ RUN --mount=type=cache,target=/go/pkg/mod \ # Copy the sources COPY ./ ./ -# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls +# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ go build . diff --git a/Makefile b/Makefile index 77fe4728a7e4..e134e5a31243 100644 --- a/Makefile +++ b/Makefile @@ -921,7 +921,7 @@ docker-push-manifest-core: ## Push the multiarch manifest for the core docker im $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./config/default/manager_pull_policy.yaml" .PHONY: docker-push-manifest-kubeadm-bootstrap -docker-push-manifest-kubeadm-bootstrap: ## Push the multiarch manifest for the the kubeadm bootstrap docker images +docker-push-manifest-kubeadm-bootstrap: ## Push the multiarch manifest for the kubeadm bootstrap docker images ## Minimum docker version 18.06.0 is required for creating and pushing manifest images. docker manifest create --amend $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(KUBEADM_BOOTSTRAP_CONTROLLER_IMG)\-&:$(TAG)~g") @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${KUBEADM_BOOTSTRAP_CONTROLLER_IMG}:${TAG} ${KUBEADM_BOOTSTRAP_CONTROLLER_IMG}-$${arch}:${TAG}; done @@ -930,7 +930,7 @@ docker-push-manifest-kubeadm-bootstrap: ## Push the multiarch manifest for the t $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./bootstrap/kubeadm/config/default/manager_pull_policy.yaml" .PHONY: docker-push-manifest-kubeadm-control-plane -docker-push-manifest-kubeadm-control-plane: ## Push the multiarch manifest for the the kubeadm control plane docker images +docker-push-manifest-kubeadm-control-plane: ## Push the multiarch manifest for the kubeadm control plane docker images ## Minimum docker version 18.06.0 is required for creating and pushing manifest images. docker manifest create --amend $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG)\-&:$(TAG)~g") @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${KUBEADM_CONTROL_PLANE_CONTROLLER_IMG}:${TAG} ${KUBEADM_CONTROL_PLANE_CONTROLLER_IMG}-$${arch}:${TAG}; done @@ -939,7 +939,7 @@ docker-push-manifest-kubeadm-control-plane: ## Push the multiarch manifest for t $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./controlplane/kubeadm/config/default/manager_pull_policy.yaml" .PHONY: docker-push-manifest-capd -docker-capd-push-manifest: ## Push the multiarch manifest for the the capd docker images +docker-capd-push-manifest: ## Push the multiarch manifest for the capd docker images ## Minimum docker version 18.06.0 is required for creating and pushing manifest images. docker manifest create --amend $(CAPD_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPD_CONTROLLER_IMG)\-&:$(TAG)~g") @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CAPD_CONTROLLER_IMG}:${TAG} ${CAPD_CONTROLLER_IMG}-$${arch}:${TAG}; done diff --git a/cmd/clusterctl/Dockerfile b/cmd/clusterctl/Dockerfile index a94af0eb7df4..5057608650c0 100644 --- a/cmd/clusterctl/Dockerfile +++ b/cmd/clusterctl/Dockerfile @@ -45,7 +45,7 @@ RUN --mount=type=cache,target=/go/pkg/mod \ # Copy the sources COPY ./ ./ -# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls +# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ go build ./cmd/clusterctl diff --git a/cmd/clusterctl/client/client.go b/cmd/clusterctl/client/client.go index 5747dd5ac68f..2425ad8460f8 100644 --- a/cmd/clusterctl/client/client.go +++ b/cmd/clusterctl/client/client.go @@ -61,8 +61,8 @@ type Client interface { Restore(options RestoreOptions) error // PlanUpgrade returns a set of suggested Upgrade plans for the cluster, and more specifically: - // - Upgrade to the latest version in the the v1alpha3 series: .... - // - Upgrade to the latest version in the the v1alpha4 series: .... + // - Upgrade to the latest version in the v1alpha3 series: .... + // - Upgrade to the latest version in the v1alpha4 series: .... PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePlan, error) // PlanCertManagerUpgrade returns a CertManagerUpgradePlan. diff --git a/cmd/clusterctl/client/cluster/components.go b/cmd/clusterctl/client/cluster/components.go index 09be0b8275dc..3fe8dd69e479 100644 --- a/cmd/clusterctl/client/cluster/components.go +++ b/cmd/clusterctl/client/cluster/components.go @@ -181,7 +181,7 @@ func (p *providerComponents) Delete(options DeleteOptions) error { // If the resource is a cluster resource, skip it if the resource name does not start with the instance prefix. // This is required because there are cluster resources like e.g. ClusterRoles and ClusterRoleBinding, which are instance specific; // During the installation, clusterctl adds the instance namespace prefix to such resources (see fixRBAC), and so we can rely - // on that for deleting only the global resources belonging the the instance we are processing. + // on that for deleting only the global resources belonging the instance we are processing. // NOTE: namespace and CRD are special case managed above; webhook instead goes hand by hand with the controller they // should always be deleted. isWebhook := obj.GroupVersionKind().Kind == validatingWebhookConfigurationKind || obj.GroupVersionKind().Kind == mutatingWebhookConfigurationKind diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index fd919fc5e5bc..a4717d0fc449 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -269,7 +269,7 @@ func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { return kerrors.NewAggregate(errList) } -// getClusterObj retrieves the the clusterObj corresponding to a node with type Cluster. +// getClusterObj retrieves the clusterObj corresponding to a node with type Cluster. func getClusterObj(proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) error { c, err := proxy.NewClient() if err != nil { @@ -287,7 +287,7 @@ func getClusterObj(proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) er return nil } -// getMachineObj retrieves the the machineObj corresponding to a node with type Machine. +// getMachineObj retrieves the machineObj corresponding to a node with type Machine. func getMachineObj(proxy Proxy, machine *node, machineObj *clusterv1.Machine) error { c, err := proxy.NewClient() if err != nil { diff --git a/cmd/clusterctl/client/cluster/objectgraph.go b/cmd/clusterctl/client/cluster/objectgraph.go index c688d0fc1466..dff48251b067 100644 --- a/cmd/clusterctl/client/cluster/objectgraph.go +++ b/cmd/clusterctl/client/cluster/objectgraph.go @@ -309,7 +309,7 @@ func (o *objectGraph) objMetaToNode(obj *unstructured.Unstructured, n *node) { } } -// getDiscoveryTypes returns the list of TypeMeta to be considered for the the move discovery phase. +// getDiscoveryTypes returns the list of TypeMeta to be considered for the move discovery phase. // This list includes all the types defines by the CRDs installed by clusterctl and the ConfigMap/Secret core types. func (o *objectGraph) getDiscoveryTypes() error { crdList := &apiextensionsv1.CustomResourceDefinitionList{} diff --git a/cmd/clusterctl/client/cluster/upgrader.go b/cmd/clusterctl/client/cluster/upgrader.go index 23aa882aa910..772be599eb4b 100644 --- a/cmd/clusterctl/client/cluster/upgrader.go +++ b/cmd/clusterctl/client/cluster/upgrader.go @@ -39,8 +39,8 @@ import ( // ProviderUpgrader defines methods for supporting provider upgrade. type ProviderUpgrader interface { // Plan returns a set of suggested Upgrade plans for the management cluster, and more specifically: - // - Upgrade to the latest version in the the v1alpha3 series: .... - // - Upgrade to the latest version in the the v1alpha4 series: .... + // - Upgrade to the latest version in the v1alpha3 series: .... + // - Upgrade to the latest version in the v1alpha4 series: .... Plan() ([]UpgradePlan, error) // ApplyPlan executes an upgrade following an UpgradePlan generated by clusterctl. diff --git a/cmd/clusterctl/internal/test/fake_proxy.go b/cmd/clusterctl/internal/test/fake_proxy.go index f4b40bf7fa23..c3273f4f11bb 100644 --- a/cmd/clusterctl/internal/test/fake_proxy.go +++ b/cmd/clusterctl/internal/test/fake_proxy.go @@ -162,7 +162,7 @@ func (f *FakeProxy) WithNamespace(n string) *FakeProxy { // WithProviderInventory can be used as a fast track for setting up test scenarios requiring an already initialized management cluster. // NB. this method adds an items to the Provider inventory, but it doesn't install the corresponding provider; if the -// test case requires the actual provider to be installed, use the the fake client to install both the provider +// test case requires the actual provider to be installed, use the fake client to install both the provider // components and the corresponding inventory item. func (f *FakeProxy) WithProviderInventory(name string, providerType clusterctlv1.ProviderType, version, targetNamespace string) *FakeProxy { f.objs = append(f.objs, &clusterctlv1.Provider{ diff --git a/docs/proposals/20200804-windows-support.md b/docs/proposals/20200804-windows-support.md index 5b74ffc26f55..eb54c27b2ebc 100644 --- a/docs/proposals/20200804-windows-support.md +++ b/docs/proposals/20200804-windows-support.md @@ -115,7 +115,7 @@ the [image-builder project](https://github.com/kubernetes-sigs/image-builder) th built need to adhere to [Windows licensing requirements](https://www.microsoft.com/en-us/licensing/product-licensing/windows-server). There is prior art for building Windows base images. For example, AKS-Engine has an example implementation for using packer and scripts to do image configuration: https://github.com/Azure/aks-engine/blob/master/vhd/packer/windows-vhd-builder.json. -Another example is the the [sig-windows-tools](https://github.com/kubernetes-sigs/sig-windows-tools) which provide scripts for image configuration when using Kubeadm. +Another example is the [sig-windows-tools](https://github.com/kubernetes-sigs/sig-windows-tools) which provide scripts for image configuration when using Kubeadm. Although the Linux implementation in image-builder uses Ansible for configuration, Windows isn't going to share the same configuration because [Ansible](https://docs.ansible.com/ansible/latest/user_guide/windows.html) requires [Windows specific modules](https://docs.ansible.com/ansible/2.9/modules/list_of_windows_modules.html) to do the configuration. @@ -131,7 +131,7 @@ and use privileged containers in place of wins.exe enabled containers. Each infrastructure providers must provide their own `PreKubeadmCommands`/`PostKubeadmCommands` scripts that are required for additional configuration for the node. During planning for Beta we will be able to identify -common overlapping features that can be added into the the base images in image-builder and for re-use +common overlapping features that can be added into the base images in image-builder and for re-use #### netbios names diff --git a/internal/controllers/topology/cluster/scope/state.go b/internal/controllers/topology/cluster/scope/state.go index d1ab8aaa1453..6c41b4b3299e 100644 --- a/internal/controllers/topology/cluster/scope/state.go +++ b/internal/controllers/topology/cluster/scope/state.go @@ -92,7 +92,7 @@ type MachineDeploymentState struct { // IsRollingOut determines if the machine deployment is upgrading. // A machine deployment is considered upgrading if: -// - if any of the replicas of the the machine deployment is not ready. +// - if any of the replicas of the machine deployment is not ready. func (md *MachineDeploymentState) IsRollingOut() bool { return !mdutil.DeploymentComplete(md.Object, &md.Object.Status) || *md.Object.Spec.Replicas != md.Object.Status.ReadyReplicas } diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 492d67462c63..d1ab2702be6a 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -74,7 +74,7 @@ var ( // with the providers specified in the configPath. clusterctlConfigPath string - // bootstrapClusterProvider manages provisioning of the the bootstrap cluster to be used for the e2e tests. + // bootstrapClusterProvider manages provisioning of the bootstrap cluster to be used for the e2e tests. // Please note that provisioning will be skipped if e2e.use-existing-cluster is provided. bootstrapClusterProvider bootstrap.ClusterProvider diff --git a/test/extension/Dockerfile b/test/extension/Dockerfile index 1f93fa8380fc..2c1e02528f03 100644 --- a/test/extension/Dockerfile +++ b/test/extension/Dockerfile @@ -45,7 +45,7 @@ RUN --mount=type=cache,target=/go/pkg/mod \ # Copy the sources COPY ./ ./ -# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls +# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ go build . diff --git a/test/framework/alltypes_helpers.go b/test/framework/alltypes_helpers.go index b28f3e3562bd..fa6ff095fd57 100644 --- a/test/framework/alltypes_helpers.go +++ b/test/framework/alltypes_helpers.go @@ -76,7 +76,7 @@ func GetCAPIResources(ctx context.Context, input GetCAPIResourcesInput) []*unstr return objList } -// getClusterAPITypes returns the list of TypeMeta to be considered for the the move discovery phase. +// getClusterAPITypes returns the list of TypeMeta to be considered for the move discovery phase. // This list includes all the types belonging to CAPI providers. func getClusterAPITypes(ctx context.Context, lister Lister) []metav1.TypeMeta { discoveredTypes := []metav1.TypeMeta{} diff --git a/test/framework/controlplane_helpers.go b/test/framework/controlplane_helpers.go index baacc92c8aa8..78b704144534 100644 --- a/test/framework/controlplane_helpers.go +++ b/test/framework/controlplane_helpers.go @@ -193,7 +193,7 @@ func AssertControlPlaneFailureDomains(ctx context.Context, input AssertControlPl Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling AssertControlPlaneFailureDomains") Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling AssertControlPlaneFailureDomains") - By("Checking all the the control plane machines are in the expected failure domains") + By("Checking all the control plane machines are in the expected failure domains") controlPlaneFailureDomains := sets.NewString() for fd, fdSettings := range input.Cluster.Status.FailureDomains { if fdSettings.ControlPlane { diff --git a/util/conditions/getter.go b/util/conditions/getter.go index 6bd866d8c8f0..0ff89940abaa 100644 --- a/util/conditions/getter.go +++ b/util/conditions/getter.go @@ -234,7 +234,7 @@ func mirror(from Getter, targetCondition clusterv1.ConditionType, options ...Mir return condition } -// Aggregates all the the Ready condition from a list of dependent objects into the target object; +// Aggregates all the Ready condition from a list of dependent objects into the target object; // if the Ready condition does not exists in one of the source object, the object is excluded from // the aggregation; if none of the source object have ready condition, no target conditions is generated. func aggregate(from []Getter, targetCondition clusterv1.ConditionType, options ...MergeOption) *clusterv1.Condition { diff --git a/util/conditions/merge.go b/util/conditions/merge.go index a3aa304b6e53..d2938c06ddbb 100644 --- a/util/conditions/merge.go +++ b/util/conditions/merge.go @@ -132,7 +132,7 @@ func (g conditionGroups) Swap(i, j int) { g[i], g[j] = g[j], g[i] } -// TopGroup returns the the condition group with the highest mergePriority. +// TopGroup returns the condition group with the highest mergePriority. func (g conditionGroups) TopGroup() *conditionGroup { if len(g) == 0 { return nil @@ -140,17 +140,17 @@ func (g conditionGroups) TopGroup() *conditionGroup { return &g[0] } -// TrueGroup returns the the condition group with status True, if any. +// TrueGroup returns the condition group with status True, if any. func (g conditionGroups) TrueGroup() *conditionGroup { return g.getByStatusAndSeverity(corev1.ConditionTrue, clusterv1.ConditionSeverityNone) } -// ErrorGroup returns the the condition group with status False and severity Error, if any. +// ErrorGroup returns the condition group with status False and severity Error, if any. func (g conditionGroups) ErrorGroup() *conditionGroup { return g.getByStatusAndSeverity(corev1.ConditionFalse, clusterv1.ConditionSeverityError) } -// WarningGroup returns the the condition group with status False and severity Warning, if any. +// WarningGroup returns the condition group with status False and severity Warning, if any. func (g conditionGroups) WarningGroup() *conditionGroup { return g.getByStatusAndSeverity(corev1.ConditionFalse, clusterv1.ConditionSeverityWarning) } diff --git a/util/conditions/setter.go b/util/conditions/setter.go index 94755f4b3b3e..c882b054d062 100644 --- a/util/conditions/setter.go +++ b/util/conditions/setter.go @@ -127,13 +127,13 @@ func SetSummary(to Setter, options ...MergeOption) { Set(to, summary(to, options...)) } -// SetMirror creates a new condition by mirroring the the Ready condition from a dependent object; +// SetMirror creates a new condition by mirroring the Ready condition from a dependent object; // if the Ready condition does not exists in the source object, no target conditions is generated. func SetMirror(to Setter, targetCondition clusterv1.ConditionType, from Getter, options ...MirrorOptions) { Set(to, mirror(from, targetCondition, options...)) } -// SetAggregate creates a new condition with the aggregation of all the the Ready condition +// SetAggregate creates a new condition with the aggregation of all the Ready condition // from a list of dependent objects; if the Ready condition does not exists in one of the source object, // the object is excluded from the aggregation; if none of the source object have ready condition, // no target conditions is generated.