Skip to content

Commit

Permalink
grammar: replace all occurrences of "the the" with "the"
Browse files Browse the repository at this point in the history
  • Loading branch information
oscr committed Sep 17, 2022
1 parent d82df1b commit 4600a49
Show file tree
Hide file tree
Showing 18 changed files with 28 additions and 28 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ RUN --mount=type=cache,target=/go/pkg/mod \
# Copy the sources
COPY ./ ./

# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls
# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go build .
Expand Down
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -921,7 +921,7 @@ docker-push-manifest-core: ## Push the multiarch manifest for the core docker im
$(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./config/default/manager_pull_policy.yaml"

.PHONY: docker-push-manifest-kubeadm-bootstrap
docker-push-manifest-kubeadm-bootstrap: ## Push the multiarch manifest for the the kubeadm bootstrap docker images
docker-push-manifest-kubeadm-bootstrap: ## Push the multiarch manifest for the kubeadm bootstrap docker images
## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
docker manifest create --amend $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(KUBEADM_BOOTSTRAP_CONTROLLER_IMG)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${KUBEADM_BOOTSTRAP_CONTROLLER_IMG}:${TAG} ${KUBEADM_BOOTSTRAP_CONTROLLER_IMG}-$${arch}:${TAG}; done
Expand All @@ -930,7 +930,7 @@ docker-push-manifest-kubeadm-bootstrap: ## Push the multiarch manifest for the t
$(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./bootstrap/kubeadm/config/default/manager_pull_policy.yaml"

.PHONY: docker-push-manifest-kubeadm-control-plane
docker-push-manifest-kubeadm-control-plane: ## Push the multiarch manifest for the the kubeadm control plane docker images
docker-push-manifest-kubeadm-control-plane: ## Push the multiarch manifest for the kubeadm control plane docker images
## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
docker manifest create --amend $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${KUBEADM_CONTROL_PLANE_CONTROLLER_IMG}:${TAG} ${KUBEADM_CONTROL_PLANE_CONTROLLER_IMG}-$${arch}:${TAG}; done
Expand All @@ -939,7 +939,7 @@ docker-push-manifest-kubeadm-control-plane: ## Push the multiarch manifest for t
$(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./controlplane/kubeadm/config/default/manager_pull_policy.yaml"

.PHONY: docker-push-manifest-capd
docker-capd-push-manifest: ## Push the multiarch manifest for the the capd docker images
docker-capd-push-manifest: ## Push the multiarch manifest for the capd docker images
## Minimum docker version 18.06.0 is required for creating and pushing manifest images.
docker manifest create --amend $(CAPD_CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CAPD_CONTROLLER_IMG)\-&:$(TAG)~g")
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CAPD_CONTROLLER_IMG}:${TAG} ${CAPD_CONTROLLER_IMG}-$${arch}:${TAG}; done
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ RUN --mount=type=cache,target=/go/pkg/mod \
# Copy the sources
COPY ./ ./

# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls
# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go build ./cmd/clusterctl
Expand Down
4 changes: 2 additions & 2 deletions cmd/clusterctl/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ type Client interface {
Restore(options RestoreOptions) error

// PlanUpgrade returns a set of suggested Upgrade plans for the cluster, and more specifically:
// - Upgrade to the latest version in the the v1alpha3 series: ....
// - Upgrade to the latest version in the the v1alpha4 series: ....
// - Upgrade to the latest version in the v1alpha3 series: ....
// - Upgrade to the latest version in the v1alpha4 series: ....
PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePlan, error)

// PlanCertManagerUpgrade returns a CertManagerUpgradePlan.
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/cluster/components.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ func (p *providerComponents) Delete(options DeleteOptions) error {
// If the resource is a cluster resource, skip it if the resource name does not start with the instance prefix.
// This is required because there are cluster resources like e.g. ClusterRoles and ClusterRoleBinding, which are instance specific;
// During the installation, clusterctl adds the instance namespace prefix to such resources (see fixRBAC), and so we can rely
// on that for deleting only the global resources belonging the the instance we are processing.
// on that for deleting only the global resources belonging the instance we are processing.
// NOTE: namespace and CRD are special case managed above; webhook instead goes hand by hand with the controller they
// should always be deleted.
isWebhook := obj.GroupVersionKind().Kind == validatingWebhookConfigurationKind || obj.GroupVersionKind().Kind == mutatingWebhookConfigurationKind
Expand Down
4 changes: 2 additions & 2 deletions cmd/clusterctl/client/cluster/mover.go
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error {
return kerrors.NewAggregate(errList)
}

// getClusterObj retrieves the the clusterObj corresponding to a node with type Cluster.
// getClusterObj retrieves the clusterObj corresponding to a node with type Cluster.
func getClusterObj(proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) error {
c, err := proxy.NewClient()
if err != nil {
Expand All @@ -287,7 +287,7 @@ func getClusterObj(proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) er
return nil
}

// getMachineObj retrieves the the machineObj corresponding to a node with type Machine.
// getMachineObj retrieves the machineObj corresponding to a node with type Machine.
func getMachineObj(proxy Proxy, machine *node, machineObj *clusterv1.Machine) error {
c, err := proxy.NewClient()
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/cluster/objectgraph.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ func (o *objectGraph) objMetaToNode(obj *unstructured.Unstructured, n *node) {
}
}

// getDiscoveryTypes returns the list of TypeMeta to be considered for the the move discovery phase.
// getDiscoveryTypes returns the list of TypeMeta to be considered for the move discovery phase.
// This list includes all the types defines by the CRDs installed by clusterctl and the ConfigMap/Secret core types.
func (o *objectGraph) getDiscoveryTypes() error {
crdList := &apiextensionsv1.CustomResourceDefinitionList{}
Expand Down
4 changes: 2 additions & 2 deletions cmd/clusterctl/client/cluster/upgrader.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ import (
// ProviderUpgrader defines methods for supporting provider upgrade.
type ProviderUpgrader interface {
// Plan returns a set of suggested Upgrade plans for the management cluster, and more specifically:
// - Upgrade to the latest version in the the v1alpha3 series: ....
// - Upgrade to the latest version in the the v1alpha4 series: ....
// - Upgrade to the latest version in the v1alpha3 series: ....
// - Upgrade to the latest version in the v1alpha4 series: ....
Plan() ([]UpgradePlan, error)

// ApplyPlan executes an upgrade following an UpgradePlan generated by clusterctl.
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/internal/test/fake_proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ func (f *FakeProxy) WithNamespace(n string) *FakeProxy {

// WithProviderInventory can be used as a fast track for setting up test scenarios requiring an already initialized management cluster.
// NB. this method adds an items to the Provider inventory, but it doesn't install the corresponding provider; if the
// test case requires the actual provider to be installed, use the the fake client to install both the provider
// test case requires the actual provider to be installed, use the fake client to install both the provider
// components and the corresponding inventory item.
func (f *FakeProxy) WithProviderInventory(name string, providerType clusterctlv1.ProviderType, version, targetNamespace string) *FakeProxy {
f.objs = append(f.objs, &clusterctlv1.Provider{
Expand Down
4 changes: 2 additions & 2 deletions docs/proposals/20200804-windows-support.md
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ the [image-builder project](https://github.com/kubernetes-sigs/image-builder) th
built need to adhere to [Windows licensing requirements](https://www.microsoft.com/en-us/licensing/product-licensing/windows-server).

There is prior art for building Windows base images. For example, AKS-Engine has an example implementation for using packer and scripts to do image configuration: https://github.com/Azure/aks-engine/blob/master/vhd/packer/windows-vhd-builder.json.
Another example is the the [sig-windows-tools](https://github.com/kubernetes-sigs/sig-windows-tools) which provide scripts for image configuration when using Kubeadm.
Another example is the [sig-windows-tools](https://github.com/kubernetes-sigs/sig-windows-tools) which provide scripts for image configuration when using Kubeadm.

Although the Linux implementation in image-builder uses Ansible for configuration, Windows isn't going to share
the same configuration because [Ansible](https://docs.ansible.com/ansible/latest/user_guide/windows.html) requires [Windows specific modules](https://docs.ansible.com/ansible/2.9/modules/list_of_windows_modules.html) to do the configuration.
Expand All @@ -131,7 +131,7 @@ and use privileged containers in place of wins.exe enabled containers.

Each infrastructure providers must provide their own `PreKubeadmCommands`/`PostKubeadmCommands` scripts that
are required for additional configuration for the node. During planning for Beta we will be able to identify
common overlapping features that can be added into the the base images in image-builder and for re-use
common overlapping features that can be added into the base images in image-builder and for re-use

#### netbios names

Expand Down
2 changes: 1 addition & 1 deletion internal/controllers/topology/cluster/scope/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ type MachineDeploymentState struct {

// IsRollingOut determines if the machine deployment is upgrading.
// A machine deployment is considered upgrading if:
// - if any of the replicas of the the machine deployment is not ready.
// - if any of the replicas of the machine deployment is not ready.
func (md *MachineDeploymentState) IsRollingOut() bool {
return !mdutil.DeploymentComplete(md.Object, &md.Object.Status) || *md.Object.Spec.Replicas != md.Object.Status.ReadyReplicas
}
2 changes: 1 addition & 1 deletion test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ var (
// with the providers specified in the configPath.
clusterctlConfigPath string

// bootstrapClusterProvider manages provisioning of the the bootstrap cluster to be used for the e2e tests.
// bootstrapClusterProvider manages provisioning of the bootstrap cluster to be used for the e2e tests.
// Please note that provisioning will be skipped if e2e.use-existing-cluster is provided.
bootstrapClusterProvider bootstrap.ClusterProvider

Expand Down
2 changes: 1 addition & 1 deletion test/extension/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ RUN --mount=type=cache,target=/go/pkg/mod \
# Copy the sources
COPY ./ ./

# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls
# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go build .
Expand Down
2 changes: 1 addition & 1 deletion test/framework/alltypes_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func GetCAPIResources(ctx context.Context, input GetCAPIResourcesInput) []*unstr
return objList
}

// getClusterAPITypes returns the list of TypeMeta to be considered for the the move discovery phase.
// getClusterAPITypes returns the list of TypeMeta to be considered for the move discovery phase.
// This list includes all the types belonging to CAPI providers.
func getClusterAPITypes(ctx context.Context, lister Lister) []metav1.TypeMeta {
discoveredTypes := []metav1.TypeMeta{}
Expand Down
2 changes: 1 addition & 1 deletion test/framework/controlplane_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ func AssertControlPlaneFailureDomains(ctx context.Context, input AssertControlPl
Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling AssertControlPlaneFailureDomains")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling AssertControlPlaneFailureDomains")

By("Checking all the the control plane machines are in the expected failure domains")
By("Checking all the control plane machines are in the expected failure domains")
controlPlaneFailureDomains := sets.NewString()
for fd, fdSettings := range input.Cluster.Status.FailureDomains {
if fdSettings.ControlPlane {
Expand Down
2 changes: 1 addition & 1 deletion util/conditions/getter.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ func mirror(from Getter, targetCondition clusterv1.ConditionType, options ...Mir
return condition
}

// Aggregates all the the Ready condition from a list of dependent objects into the target object;
// Aggregates all the Ready condition from a list of dependent objects into the target object;
// if the Ready condition does not exists in one of the source object, the object is excluded from
// the aggregation; if none of the source object have ready condition, no target conditions is generated.
func aggregate(from []Getter, targetCondition clusterv1.ConditionType, options ...MergeOption) *clusterv1.Condition {
Expand Down
8 changes: 4 additions & 4 deletions util/conditions/merge.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,25 +132,25 @@ func (g conditionGroups) Swap(i, j int) {
g[i], g[j] = g[j], g[i]
}

// TopGroup returns the the condition group with the highest mergePriority.
// TopGroup returns the condition group with the highest mergePriority.
func (g conditionGroups) TopGroup() *conditionGroup {
if len(g) == 0 {
return nil
}
return &g[0]
}

// TrueGroup returns the the condition group with status True, if any.
// TrueGroup returns the condition group with status True, if any.
func (g conditionGroups) TrueGroup() *conditionGroup {
return g.getByStatusAndSeverity(corev1.ConditionTrue, clusterv1.ConditionSeverityNone)
}

// ErrorGroup returns the the condition group with status False and severity Error, if any.
// ErrorGroup returns the condition group with status False and severity Error, if any.
func (g conditionGroups) ErrorGroup() *conditionGroup {
return g.getByStatusAndSeverity(corev1.ConditionFalse, clusterv1.ConditionSeverityError)
}

// WarningGroup returns the the condition group with status False and severity Warning, if any.
// WarningGroup returns the condition group with status False and severity Warning, if any.
func (g conditionGroups) WarningGroup() *conditionGroup {
return g.getByStatusAndSeverity(corev1.ConditionFalse, clusterv1.ConditionSeverityWarning)
}
Expand Down
4 changes: 2 additions & 2 deletions util/conditions/setter.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,13 +127,13 @@ func SetSummary(to Setter, options ...MergeOption) {
Set(to, summary(to, options...))
}

// SetMirror creates a new condition by mirroring the the Ready condition from a dependent object;
// SetMirror creates a new condition by mirroring the Ready condition from a dependent object;
// if the Ready condition does not exists in the source object, no target conditions is generated.
func SetMirror(to Setter, targetCondition clusterv1.ConditionType, from Getter, options ...MirrorOptions) {
Set(to, mirror(from, targetCondition, options...))
}

// SetAggregate creates a new condition with the aggregation of all the the Ready condition
// SetAggregate creates a new condition with the aggregation of all the Ready condition
// from a list of dependent objects; if the Ready condition does not exists in one of the source object,
// the object is excluded from the aggregation; if none of the source object have ready condition,
// no target conditions is generated.
Expand Down

0 comments on commit 4600a49

Please sign in to comment.