diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index 0af3f2fd84ca..873e60a496bf 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -79,6 +79,22 @@ jobs: # print existing client auth kubectl -n rook-ceph exec $toolbox -- ceph auth ls + - name: test re-running of external script should result in same output + run: | + toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') + kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name=replicapool | tee output1.txt + kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name=replicapool | tee output2.txt + if cmp output1.txt output2.txt; then + echo "files have same output" + rm output1.txt + rm output2.txt + else + echo "re-run with same flags changed the output, result in failure" + rm output1.txt + rm output2.txt + exit 1 + fi + - name: dry run external script create-external-cluster-resources.py run: | toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') @@ -157,7 +173,7 @@ jobs: kubectl -n rook-ceph exec $toolbox -- ceph auth ls # update the existing non-restricted client auth with the new ones kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --upgrade - # print ugraded client auth + # print upgraded client auth kubectl -n rook-ceph exec $toolbox -- ceph auth ls - name: test the upgrade flag for restricted auth user @@ -168,7 +184,7 @@ jobs: # restricted auth user need to provide --rbd-data-pool-name, # --cluster-name and --run-as-user flag while upgrading kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool - # print ugraded client auth + # print upgraded client auth kubectl -n rook-ceph exec $toolbox -- ceph auth get client.csi-rbd-node-rookstorage-replicapool - name: validate-rgw-endpoint @@ -1277,7 +1293,7 @@ jobs: # snaps=$(kubectl -n rook-ceph exec deploy/rook-ceph-fs-mirror -- ceph --admin-daemon /var/run/ceph/$mirror_daemon fs mirror peer status myfs@1 $clusterfsid|jq -r '."/volumes/_nogroup/testsubvolume"."snaps_synced"') # echo "snapshots: $snaps" # if [ $num_snaps_target = $snaps ] - # then echo "Snaphots have synced." + # then echo "Snapshots have synced." # else echo "Snaps have not synced. NEEDS INVESTIGATION" # fi diff --git a/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-multisite.md b/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-multisite.md index 3a4e0990c42e..7304d91c1bcf 100644 --- a/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-multisite.md +++ b/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-multisite.md @@ -189,7 +189,7 @@ kubectl create -f object-multisite-pull-realm.yaml ## Scaling a Multisite Scaling the number of gateways that run the synchronization thread to 2 or more can increase the latency of the -replication of each S3 object. The recommended way to scale a mutisite configuration is to dissociate the gateway dedicated +replication of each S3 object. The recommended way to scale a multisite configuration is to dissociate the gateway dedicated to the synchronization from gateways that serve clients. The two types of gateways can be deployed by creating two CephObjectStores associated with the same CephObjectZone. The diff --git a/Documentation/Storage-Configuration/ceph-teardown.md b/Documentation/Storage-Configuration/ceph-teardown.md index 4b1224d9f96f..7e23df19bac1 100644 --- a/Documentation/Storage-Configuration/ceph-teardown.md +++ b/Documentation/Storage-Configuration/ceph-teardown.md @@ -116,7 +116,7 @@ partprobe $DISK ``` Ceph can leave LVM and device mapper data that can lock the disks, preventing the disks from being -used again. These steps can help to free up old Ceph disks for re-use. Note that this only needs to +used again. These steps can help to free up old Ceph disks for reuse. Note that this only needs to be run once on each node. If you have **only one** Rook cluster and **all** Ceph disks are being wiped, run the following command. diff --git a/cmd/rook/rook/rook.go b/cmd/rook/rook/rook.go index 615bbcf8d81c..c8134db315ef 100644 --- a/cmd/rook/rook/rook.go +++ b/cmd/rook/rook/rook.go @@ -228,7 +228,7 @@ func GetInternalOrExternalClient() kubernetes.Interface { for _, kConf := range strings.Split(kubeconfig, ":") { restConfig, err = clientcmd.BuildConfigFromFlags("", kConf) if err == nil { - logger.Debugf("attmepting to create kube clientset from kube config file %q", kConf) + logger.Debugf("attempting to create kube clientset from kube config file %q", kConf) clientset, err = kubernetes.NewForConfig(restConfig) if err == nil { logger.Infof("created kube client interface from kube config file %q present in KUBECONFIG environment variable", kConf) diff --git a/deploy/examples/create-external-cluster-resources.py b/deploy/examples/create-external-cluster-resources.py index f63e3485b7df..34dc150a3161 100644 --- a/deploy/examples/create-external-cluster-resources.py +++ b/deploy/examples/create-external-cluster-resources.py @@ -58,9 +58,11 @@ try: # for 2.7.x from urlparse import urlparse + from urllib import urlencode as urlencode except ModuleNotFoundError: # for 3.x from urllib.parse import urlparse + from urllib.parse import urlencode as urlencode try: from base64 import encodestring @@ -555,7 +557,7 @@ def _check_conflicting_options(self): ) def _invalid_endpoint(self, endpoint_str): - # seprating port, by getting last split of `:` delimiter + # separating port, by getting last split of `:` delimiter try: endpoint_str_ip, port = endpoint_str.rsplit(":", 1) except ValueError: @@ -1160,7 +1162,7 @@ def create_checkerKey(self, user): # check if user already exist user_key = self.check_user_exist(entity) if user_key != "": - return user_key, f"{entity.split('.', 1)[1]}" + return user_key ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, @@ -1363,7 +1365,7 @@ def get_rgw_fsid(self, base_url, verify): rgw_endpoint = self._arg_parser.rgw_endpoint base_url = base_url + "://" + rgw_endpoint + "/admin/info?" params = {"format": "json"} - request_url = base_url + urllib.parse.urlencode(params) + request_url = base_url + urlencode(params) try: r = requests.get( diff --git a/pkg/daemon/multus/validation.go b/pkg/daemon/multus/validation.go index 5ddf7d5204b7..5ada656bad21 100644 --- a/pkg/daemon/multus/validation.go +++ b/pkg/daemon/multus/validation.go @@ -113,7 +113,7 @@ func (s *getExpectedNumberOfImagePullPodsState) Run( } /* - * Re-usable state to verify that expected number of pods are "Running" but not necessarily "Ready" + * Reusable state to verify that expected number of pods are "Running" but not necessarily "Ready" * > Verify all image pull pods are running * -- next state --> Delete image pull daemonset * > Verify all client pods are running diff --git a/pkg/operator/ceph/object/admin.go b/pkg/operator/ceph/object/admin.go index 0c854f051934..9cc3b3612e7b 100644 --- a/pkg/operator/ceph/object/admin.go +++ b/pkg/operator/ceph/object/admin.go @@ -302,7 +302,7 @@ func CommitConfigChanges(c *Context) error { return errorOrIsNotFound(err, "failed to get the current RGW configuration period to see if it needs changed") } - // this stages the current config changees and returns what the new period config will look like + // this stages the current config changes and returns what the new period config will look like // without committing the changes stagedPeriod, err := runAdminCommand(c, true, "period", "update") if err != nil { diff --git a/tests/framework/clients/block.go b/tests/framework/clients/block.go index f079398320d7..9b56e14d14d1 100644 --- a/tests/framework/clients/block.go +++ b/tests/framework/clients/block.go @@ -49,7 +49,7 @@ func CreateBlockOperation(k8shelp *utils.K8sHelper, manifests installer.CephMani // BlockCreate Function to create a Block using Rook // Input parameters - // manifest - pod definition that creates a pvc in k8s - yaml should describe name and size of pvc being created -// size - not user for k8s implementation since its descried on the pvc yaml definition +// size - not user for k8s implementation since its described on the pvc yaml definition // Output - k8s create pvc operation output and/or error func (b *BlockOperation) Create(manifest string, size int) (string, error) { args := []string{"apply", "-f", "-"}