PlacementSpec is the placement for core ceph daemons part of the CephCluster CRD
+PlacementStorageClassSpec
+
+
+(Appears on: PoolPlacementSpec )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+name
+
+string
+
+
+
+Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+however most clients/libs insist on AWS names so it is recommended to use
+one of the valid x-amz-storage-class values for better compatibility:
+REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+
+
+
+
+dataPoolName
+
+string
+
+
+
+DataPoolName is the data pool used to store ObjectStore objects data.
+
+
+
+
PoolMirroringInfo
@@ -10780,6 +10847,85 @@ StatesSpec
+
PoolPlacementSpec
+
+
+(Appears on: ObjectSharedPoolsSpec )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+name
+
+string
+
+
+
+Pool placement name. Name can be arbitrary. Placement with name “default” will be used as default.
+
+
+
+
+metadataPoolName
+
+string
+
+
+
+The metadata pool used to store ObjectStore bucket index.
+
+
+
+
+dataPoolName
+
+string
+
+
+
+The data pool used to store ObjectStore objects data.
+
+
+
+
+dataNonECPoolName
+
+string
+
+
+
+(Optional)
+The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+
+
+
+
+storageClasses
+
+
+[]PlacementStorageClassSpec
+
+
+
+
+(Optional)
+StorageClasses can be selected by user to override dataPoolName during object creation.
+Each placement has default STANDARD StorageClass pointing to dataPoolName.
+This list allows defining additional StorageClasses on top of default STANDARD storage class.
+
+
+
+
PoolSpec
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
index d81f55ebf2c2..dc5df80b8847 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
+++ b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
@@ -200,11 +200,18 @@ CSI-Addons supports the following operations:
* [Creating a ReclaimSpaceCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#reclaimspacecronjob)
* [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#annotating-perstentvolumeclaims)
* [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#annotating-namespace)
+ * [Annotating StorageClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#annotating-storageclass)
* Network Fencing
* [Creating a NetworkFence](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/networkfence.md)
* Volume Replication
* [Creating VolumeReplicationClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/volumereplicationclass.md)
* [Creating VolumeReplication CR](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/volumereplication.md)
+* Key Rotation Job for PV encryption
+ * [Creating EncryptionKeyRotationJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#encryptionkeyrotationjob)
+ * [Creating EncryptionKeyRotationCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#encryptionkeyrotationcronjob)
+ * [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#annotating-persistentvolumeclaims)
+ * [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#annotating-namespace)
+ * [Annotating StorageClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#annotating-storageclass)
## Enable RBD and CephFS Encryption Support
diff --git a/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md b/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
index 50bfcd176a20..4c291a0c2409 100644
--- a/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
+++ b/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
@@ -14,8 +14,9 @@ Rook can configure the Ceph Object Store for several different scenarios. See ea
1. Create a [local object store](#create-a-local-object-store-with-s3) with dedicated Ceph pools. This option is recommended if a single object store is required, and is the simplest to get started.
2. Create [one or more object stores with shared Ceph pools](#create-local-object-stores-with-shared-pools). This option is recommended when multiple object stores are required.
-3. Connect to an [RGW service in an external Ceph cluster](#connect-to-an-external-object-store), rather than create a local object store.
-4. Configure [RGW Multisite](#object-multisite) to synchronize buckets between object stores in different clusters.
+3. Create [one or more object stores with pool placement targets and storage classes](#create-local-object-stores-with-pool-placements). This configuration allows Rook to provide different object placement options to object store clients.
+4. Connect to an [RGW service in an external Ceph cluster](#connect-to-an-external-object-store), rather than create a local object store.
+5. Configure [RGW Multisite](#object-multisite) to synchronize buckets between object stores in different clusters.
!!! note
Updating the configuration of an object store between these types is not supported.
@@ -188,6 +189,83 @@ To consume the object store, continue below in the section to [Create a bucket](
Modify the default example object store name from `my-store` to the alternate name of the object store
such as `store-a` in this example.
+### Create Local Object Store(s) with pool placements
+
+!!! attention
+ This feature is experimental.
+
+This section contains a guide on how to configure [RGW's pool placement and storage classes](https://docs.ceph.com/en/reef/radosgw/placement/) with Rook.
+
+Object Storage API allows users to override where bucket data will be stored during bucket creation. With `` parameter in S3 API and `X-Storage-Policy` header in SWIFT. Similarly, users can override where object data will be stored by setting `X-Amz-Storage-Class` and `X-Object-Storage-Class` during object creation.
+
+To enable this feature, configure `poolPlacements` representing a list of possible bucket data locations.
+Each `poolPlacement` must have:
+
+* a **unique** `name` to refer to it in `` or `X-Storage-Policy`. A placement with reserved name `default` will be used by default if no location constraint is provided.
+* `dataPoolName` and `metadataPoolName` representing object data and metadata locations. In Rook, these data locations are backed by `CephBlockPool`. `poolPlacements` and `storageClasses` specs refer pools by name. This means that all pools should be defined in advance. Similarly to [sharedPools](#create-local-object-stores-with-shared-pools), the same pool can be reused across multiple ObjectStores and/or poolPlacements/storageClasses because of RADOS namespaces. Here, each pool will be namespaced with `..` key.
+* **optional** `dataNonECPoolName` - extra pool for data that cannot use erasure coding (ex: multi-part uploads). If not set, `metadataPoolName` will be used.
+* **optional** list of placement `storageClasses`. Classes defined per placement, which means that even classes of `default` placement will be available only within this placement and not others. Each placement will automatically have default storage class named `STANDARD`. `STANDARD` class always points to placement `dataPoolName` and cannot be removed or redefined. Each storage class must have:
+ * `name` (unique within placement). RGW allows arbitrary name for StorageClasses, however some clients/libs insist on AWS names so it is recommended to use one of the valid `x-amz-storage-class` values for better compatibility: `STANDARD | REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE`. See [AWS docs](https://aws.amazon.com/s3/storage-classes/).
+ * `dataPoolName` - overrides placement data pool when this class is selected by user.
+
+Example: Configure `CephObjectStore` with `default` placement pointing to `us` pools and placement `europe` pointing to pools in corresponding geographies. These geographical locations are only an example. Placement name can be arbitrary and could reflect the backing pool's replication factor, device class, or failure domain. This example also defines storage class `REDUCED_REDUNDANCY` for each placement.
+
+```yaml
+apiVersion: ceph.rook.io/v1
+kind: CephObjectStore
+metadata:
+ name: my-store
+ namespace: rook-ceph
+spec:
+ gateway:
+ port: 80
+ instances: 1
+ sharedPools:
+ poolPlacements:
+ - name: default
+ metadataPoolName: "us-data-pool"
+ dataPoolName: "us-meta-pool"
+ storageClasses:
+ - name: REDUCED_REDUNDANCY
+ dataPoolName: "us-reduced-pool"
+ - name: europe
+ metadataPoolName: "eu-meta-pool"
+ dataPoolName: "eu-data-pool"
+ storageClasses:
+ - name: REDUCED_REDUNDANCY
+ dataPoolName: "eu-reduced-pool"
+```
+
+S3 clients can direct objects into the pools defined in the above. The example below uses the [s5cmd](https://github.com/peak/s5cmd) CLI tool which is pre-installed in the toolbox pod:
+
+```shell
+# make bucket without location constraint -> will use "us"
+s5cmd mb s3://bucket1
+
+# put object to bucket1 without storage class -> end up in "us-data-pool"
+s5cmd put obj s3://bucket1/obj
+
+# put object to bucket1 with "STANDARD" storage class -> end up in "us-data-pool"
+s5cmd put obj s3://bucket1/obj --storage-class=STANDARD
+
+# put object to bucket1 with "REDUCED_REDUNDANCY" storage class -> end up in "us-reduced-pool"
+s5cmd put obj s3://bucket1/obj --storage-class=REDUCED_REDUNDANCY
+
+
+# make bucket with location constraint europe
+s5cmd mb s3://bucket2 --region=my-store:europe
+
+# put object to bucket2 without storage class -> end up in "eu-data-pool"
+s5cmd put obj s3://bucket2/obj
+
+# put object to bucket2 with "STANDARD" storage class -> end up in "eu-data-pool"
+s5cmd put obj s3://bucket2/obj --storage-class=STANDARD
+
+# put object to bucket2 with "REDUCED_REDUNDANCY" storage class -> end up in "eu-reduced-pool"
+s5cmd put obj s3://bucket2/obj --storage-class=REDUCED_REDUNDANCY
+
+```
+
### Connect to an External Object Store
Rook can connect to existing RGW gateways to work in conjunction with the external mode of the `CephCluster` CRD. First, create a `rgw-admin-ops-user` user in the Ceph cluster with the necessary caps:
diff --git a/Makefile b/Makefile
index 0eb1ff7e5f44..e0ffb3fdabc7 100644
--- a/Makefile
+++ b/Makefile
@@ -215,7 +215,7 @@ helm-docs: $(HELM_DOCS) ## Use helm-docs to generate documentation from helm cha
-t ../../../Documentation/Helm-Charts/ceph-cluster-chart.gotmpl.md \
-t ../../../Documentation/Helm-Charts/_templates.gotmpl
-check-helm-docs:
+check.helm-docs:
@$(MAKE) helm-docs
@git diff --exit-code || { \
echo "Please run 'make helm-docs' locally, commit the updated docs, and push the change. See https://rook.io/docs/rook/latest/Contributing/documentation/#making-docs" ; \
diff --git a/build/csv/ceph/ceph.rook.io_cephobjectstores.yaml b/build/csv/ceph/ceph.rook.io_cephobjectstores.yaml
index cafd46dba004..3f86d6ffce5d 100644
--- a/build/csv/ceph/ceph.rook.io_cephobjectstores.yaml
+++ b/build/csv/ceph/ceph.rook.io_cephobjectstores.yaml
@@ -1476,11 +1476,44 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ items:
+ properties:
+ dataNonECPoolName:
+ type: string
+ dataPoolName:
+ minLength: 1
+ type: string
+ metadataPoolName:
+ minLength: 1
+ type: string
+ name:
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ items:
+ properties:
+ dataPoolName:
+ minLength: 1
+ type: string
+ name:
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zone:
nullable: true
diff --git a/build/csv/ceph/ceph.rook.io_cephobjectzones.yaml b/build/csv/ceph/ceph.rook.io_cephobjectzones.yaml
index f5946e3142e8..deb02663feb5 100644
--- a/build/csv/ceph/ceph.rook.io_cephobjectzones.yaml
+++ b/build/csv/ceph/ceph.rook.io_cephobjectzones.yaml
@@ -314,11 +314,44 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ items:
+ properties:
+ dataNonECPoolName:
+ type: string
+ dataPoolName:
+ minLength: 1
+ type: string
+ metadataPoolName:
+ minLength: 1
+ type: string
+ name:
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ items:
+ properties:
+ dataPoolName:
+ minLength: 1
+ type: string
+ name:
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zoneGroup:
type: string
diff --git a/deploy/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml
index e7954efe7b9e..767e08498461 100644
--- a/deploy/charts/rook-ceph/templates/resources.yaml
+++ b/deploy/charts/rook-ceph/templates/resources.yaml
@@ -12316,12 +12316,72 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ description: |-
+ PoolPlacements control which Pools are associated with a particular RGW bucket.
+ Once PoolPlacements are defined, RGW client will be able to associate pool
+ with ObjectStore bucket by providing "" during s3 bucket creation
+ or "X-Storage-Policy" header during swift container creation.
+ See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ PoolPlacement with name: "default" will be used as a default pool if no option
+ is provided during bucket creation.
+ If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ items:
+ properties:
+ dataNonECPoolName:
+ description: |-
+ The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ type: string
+ dataPoolName:
+ description: The data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ metadataPoolName:
+ description: The metadata pool used to store ObjectStore bucket index.
+ minLength: 1
+ type: string
+ name:
+ description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ description: |-
+ StorageClasses can be selected by user to override dataPoolName during object creation.
+ Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ items:
+ properties:
+ dataPoolName:
+ description: DataPoolName is the data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ name:
+ description: |-
+ Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ however most clients/libs insist on AWS names so it is recommended to use
+ one of the valid x-amz-storage-class values for better compatibility:
+ REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zone:
description: The multisite info
@@ -13178,12 +13238,72 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ description: |-
+ PoolPlacements control which Pools are associated with a particular RGW bucket.
+ Once PoolPlacements are defined, RGW client will be able to associate pool
+ with ObjectStore bucket by providing "" during s3 bucket creation
+ or "X-Storage-Policy" header during swift container creation.
+ See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ PoolPlacement with name: "default" will be used as a default pool if no option
+ is provided during bucket creation.
+ If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ items:
+ properties:
+ dataNonECPoolName:
+ description: |-
+ The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ type: string
+ dataPoolName:
+ description: The data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ metadataPoolName:
+ description: The metadata pool used to store ObjectStore bucket index.
+ minLength: 1
+ type: string
+ name:
+ description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ description: |-
+ StorageClasses can be selected by user to override dataPoolName during object creation.
+ Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ items:
+ properties:
+ dataPoolName:
+ description: DataPoolName is the data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ name:
+ description: |-
+ Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ however most clients/libs insist on AWS names so it is recommended to use
+ one of the valid x-amz-storage-class values for better compatibility:
+ REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zoneGroup:
description: The display name for the ceph users
diff --git a/deploy/examples/crds.yaml b/deploy/examples/crds.yaml
index 5978262293d4..9f7a3d225655 100644
--- a/deploy/examples/crds.yaml
+++ b/deploy/examples/crds.yaml
@@ -12307,12 +12307,72 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ description: |-
+ PoolPlacements control which Pools are associated with a particular RGW bucket.
+ Once PoolPlacements are defined, RGW client will be able to associate pool
+ with ObjectStore bucket by providing "" during s3 bucket creation
+ or "X-Storage-Policy" header during swift container creation.
+ See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ PoolPlacement with name: "default" will be used as a default pool if no option
+ is provided during bucket creation.
+ If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ items:
+ properties:
+ dataNonECPoolName:
+ description: |-
+ The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ type: string
+ dataPoolName:
+ description: The data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ metadataPoolName:
+ description: The metadata pool used to store ObjectStore bucket index.
+ minLength: 1
+ type: string
+ name:
+ description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ description: |-
+ StorageClasses can be selected by user to override dataPoolName during object creation.
+ Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ items:
+ properties:
+ dataPoolName:
+ description: DataPoolName is the data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ name:
+ description: |-
+ Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ however most clients/libs insist on AWS names so it is recommended to use
+ one of the valid x-amz-storage-class values for better compatibility:
+ REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zone:
description: The multisite info
@@ -13166,12 +13226,72 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ description: |-
+ PoolPlacements control which Pools are associated with a particular RGW bucket.
+ Once PoolPlacements are defined, RGW client will be able to associate pool
+ with ObjectStore bucket by providing "" during s3 bucket creation
+ or "X-Storage-Policy" header during swift container creation.
+ See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ PoolPlacement with name: "default" will be used as a default pool if no option
+ is provided during bucket creation.
+ If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ items:
+ properties:
+ dataNonECPoolName:
+ description: |-
+ The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ type: string
+ dataPoolName:
+ description: The data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ metadataPoolName:
+ description: The metadata pool used to store ObjectStore bucket index.
+ minLength: 1
+ type: string
+ name:
+ description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ description: |-
+ StorageClasses can be selected by user to override dataPoolName during object creation.
+ Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ items:
+ properties:
+ dataPoolName:
+ description: DataPoolName is the data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ name:
+ description: |-
+ Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ however most clients/libs insist on AWS names so it is recommended to use
+ one of the valid x-amz-storage-class values for better compatibility:
+ REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zoneGroup:
description: The display name for the ceph users
diff --git a/go.mod b/go.mod
index 076aa298e388..789a8464d54b 100644
--- a/go.mod
+++ b/go.mod
@@ -31,8 +31,8 @@ require (
github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1
github.com/libopenstorage/secrets v0.0.0-20240416031220-a17cf7f72c6c
github.com/pkg/errors v0.9.1
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.0
- github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.0
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1
+ github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.1
github.com/rook/rook/pkg/apis v0.0.0-20231204200402-5287527732f7
github.com/sethvargo/go-password v0.3.1
github.com/spf13/cobra v1.8.1
diff --git a/go.sum b/go.sum
index 1faf19988f81..ccec62e8dcf6 100644
--- a/go.sum
+++ b/go.sum
@@ -778,11 +778,11 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.0 h1:tRwEFYFg+To2TGnibGl8dHBCh8Z/BVNKnXj2O5Za/2M=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.0/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1 h1:QU2cs0xxKYvF1JfibP/8vs+pFy6OvIpqNR2lYC4jYNU=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM=
github.com/prometheus-operator/prometheus-operator/pkg/client v0.46.0/go.mod h1:k4BrWlVQQsvBiTcDnKEMgyh/euRxyxgrHdur/ZX/sdA=
-github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.0 h1:bJhRd6R4kaYBZpH7cBrzbJpEKJjHx8cbVW1n3dxYnag=
-github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.0/go.mod h1:Nu6G9XLApnqXqunMwMYulcHlaxRwoveH4p4WnZsBHD8=
+github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.1 h1:wMPmeRdflJFu14F0YaIiOIYGkBDDKipkeWW0q53d2+s=
+github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.1/go.mod h1:7vND+IkdMpZyfSyRs6P5/uXz6BlFDaOj8olErODi8I0=
github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go
index 380c1f3cd23d..c817ac41ac73 100755
--- a/pkg/apis/ceph.rook.io/v1/types.go
+++ b/pkg/apis/ceph.rook.io/v1/types.go
@@ -1517,15 +1517,75 @@ type ObjectStoreSpec struct {
type ObjectSharedPoolsSpec struct {
// The metadata pool used for creating RADOS namespaces in the object store
// +kubebuilder:validation:XValidation:message="object store shared metadata pool is immutable",rule="self == oldSelf"
- MetadataPoolName string `json:"metadataPoolName"`
+ // +optional
+ MetadataPoolName string `json:"metadataPoolName,omitempty"`
// The data pool used for creating RADOS namespaces in the object store
// +kubebuilder:validation:XValidation:message="object store shared data pool is immutable",rule="self == oldSelf"
- DataPoolName string `json:"dataPoolName"`
+ // +optional
+ DataPoolName string `json:"dataPoolName,omitempty"`
// Whether the RADOS namespaces should be preserved on deletion of the object store
// +optional
PreserveRadosNamespaceDataOnDelete bool `json:"preserveRadosNamespaceDataOnDelete"`
+
+ // PoolPlacements control which Pools are associated with a particular RGW bucket.
+ // Once PoolPlacements are defined, RGW client will be able to associate pool
+ // with ObjectStore bucket by providing "" during s3 bucket creation
+ // or "X-Storage-Policy" header during swift container creation.
+ // See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ // PoolPlacement with name: "default" will be used as a default pool if no option
+ // is provided during bucket creation.
+ // If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ // If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ // +optional
+ PoolPlacements []PoolPlacementSpec `json:"poolPlacements,omitempty"`
+}
+
+type PoolPlacementSpec struct {
+ // Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9._/-]+$`
+ Name string `json:"name"`
+
+ // The metadata pool used to store ObjectStore bucket index.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ MetadataPoolName string `json:"metadataPoolName"`
+
+ // The data pool used to store ObjectStore objects data.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ DataPoolName string `json:"dataPoolName"`
+
+ // The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ // If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ // +optional
+ DataNonECPoolName string `json:"dataNonECPoolName,omitempty"`
+
+ // StorageClasses can be selected by user to override dataPoolName during object creation.
+ // Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ // This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ // +optional
+ StorageClasses []PlacementStorageClassSpec `json:"storageClasses,omitempty"`
+}
+
+type PlacementStorageClassSpec struct {
+ // Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ // however most clients/libs insist on AWS names so it is recommended to use
+ // one of the valid x-amz-storage-class values for better compatibility:
+ // REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ // See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9._/-]+$`
+ Name string `json:"name"`
+
+ // DataPoolName is the data pool used to store ObjectStore objects data.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ DataPoolName string `json:"dataPoolName"`
}
// ObjectHealthCheckSpec represents the health check of an object store
@@ -2019,7 +2079,7 @@ type CephObjectZoneGroupList struct {
// ObjectZoneGroupSpec represent the spec of an ObjectZoneGroup
type ObjectZoneGroupSpec struct {
- //The display name for the ceph users
+ // The display name for the ceph users
Realm string `json:"realm"`
}
@@ -2050,7 +2110,7 @@ type CephObjectZoneList struct {
// ObjectZoneSpec represent the spec of an ObjectZone
type ObjectZoneSpec struct {
- //The display name for the ceph users
+ // The display name for the ceph users
ZoneGroup string `json:"zoneGroup"`
// The metadata pool settings
diff --git a/pkg/daemon/ceph/client/crush.go b/pkg/daemon/ceph/client/crush.go
index 2f1c4d4631cd..75bb6fc01caf 100644
--- a/pkg/daemon/ceph/client/crush.go
+++ b/pkg/daemon/ceph/client/crush.go
@@ -75,7 +75,7 @@ type ruleSpec struct {
type stepSpec struct {
Operation string `json:"op"`
- Number uint `json:"num"`
+ Number int `json:"num"`
Item int `json:"item"`
ItemName string `json:"item_name"`
Type string `json:"type"`
diff --git a/pkg/daemon/ceph/client/crush_rule.go b/pkg/daemon/ceph/client/crush_rule.go
index b79d5470ac5c..97575b2a8164 100644
--- a/pkg/daemon/ceph/client/crush_rule.go
+++ b/pkg/daemon/ceph/client/crush_rule.go
@@ -150,7 +150,7 @@ func buildTwoStepCrushSteps(pool cephv1.PoolSpec) []stepSpec {
// Step three
stepTakeSubFailureDomain := &stepSpec{
Operation: "chooseleaf_firstn",
- Number: pool.Replicated.ReplicasPerFailureDomain,
+ Number: int(pool.Replicated.ReplicasPerFailureDomain),
Type: pool.Replicated.SubFailureDomain,
}
steps = append(steps, *stepTakeSubFailureDomain)
diff --git a/pkg/daemon/ceph/client/crush_rule_test.go b/pkg/daemon/ceph/client/crush_rule_test.go
index ce799bfdf138..c48e52b7355d 100644
--- a/pkg/daemon/ceph/client/crush_rule_test.go
+++ b/pkg/daemon/ceph/client/crush_rule_test.go
@@ -56,7 +56,7 @@ func TestBuildCrushSteps(t *testing.T) {
assert.Equal(t, 4, len(steps))
assert.Equal(t, cephv1.DefaultCRUSHRoot, steps[0].ItemName)
assert.Equal(t, "datacenter", steps[1].Type)
- assert.Equal(t, uint(2), steps[2].Number)
+ assert.Equal(t, 2, steps[2].Number)
}
func TestCompileCRUSHMap(t *testing.T) {
diff --git a/pkg/operator/ceph/csi/controller.go b/pkg/operator/ceph/csi/controller.go
index f5341a48325f..b93fe7a09bb8 100644
--- a/pkg/operator/ceph/csi/controller.go
+++ b/pkg/operator/ceph/csi/controller.go
@@ -150,7 +150,6 @@ var reconcileSaveCSIDriverOptions = SaveCSIDriverOptions
func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, error) {
// reconcileResult is used to communicate the result of the reconciliation back to the caller
var reconcileResult reconcile.Result
- var clusterNamespace string
ownerRef, err := k8sutil.GetDeploymentOwnerReference(r.opManagerContext, r.context.Clientset, os.Getenv(k8sutil.PodNameEnvVar), r.opConfig.OperatorNamespace)
if err != nil {
@@ -295,7 +294,6 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e
return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to load cluster info for cluster %q", cluster.Name)
}
clusterInfo.OwnerInfo = k8sutil.NewOwnerInfo(&cephClusters.Items[i], r.scheme)
- clusterNamespace = clusterInfo.Namespace
// is holder enabled for this cluster?
thisHolderEnabled := (!csiHostNetworkEnabled || cluster.Spec.Network.IsMultus()) && !csiDisableHolders
@@ -331,13 +329,10 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to reconcile csi-op config CR")
}
return reconcileResult, nil
- } else {
- r.deleteCSIOperatorResources(clusterNamespace, false)
}
}
if !EnableCSIOperator() {
- r.deleteCSIOperatorResources(clusterNamespace, true)
err = r.validateAndConfigureDrivers(serverVersion, ownerInfo)
if err != nil {
diff --git a/pkg/operator/ceph/csi/operator_config.go b/pkg/operator/ceph/csi/operator_config.go
index 82f27c19a9ba..d1aceecfb698 100644
--- a/pkg/operator/ceph/csi/operator_config.go
+++ b/pkg/operator/ceph/csi/operator_config.go
@@ -176,20 +176,3 @@ func (r *ReconcileCSI) createImageSetConfigmap() (string, error) {
return cm.Name, nil
}
-
-func (r *ReconcileCSI) deleteImageSetConfigMap() error {
- cm := &v1.ConfigMap{}
- err := r.client.Get(r.opManagerContext, types.NamespacedName{Name: cm.Name, Namespace: r.opConfig.OperatorNamespace}, cm)
- if err != nil {
- if kerrors.IsNotFound(err) {
- return nil
- }
- }
- err = r.client.Delete(r.opManagerContext, cm)
- if nil != err {
- return errors.Wrapf(err, "failed to delete imageSet configMap %v", cm.Name)
- }
- logger.Infof("deleted imageSet configMap %q", cm.Name)
-
- return nil
-}
diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go
index b3033f15cdeb..ab029bce5ec4 100644
--- a/pkg/operator/ceph/csi/spec.go
+++ b/pkg/operator/ceph/csi/spec.go
@@ -42,7 +42,6 @@ import (
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
- csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
cephcsi "github.com/ceph/ceph-csi/api/deploy/kubernetes"
)
@@ -762,48 +761,6 @@ func (r *ReconcileCSI) stopDrivers(ver *version.Info) error {
return nil
}
-func (r *ReconcileCSI) deleteCSIOperatorResources(clusterNamespace string, deleteOp bool) {
- csiCephConnection := &csiopv1a1.CephConnection{}
-
- err := r.client.DeleteAllOf(r.opManagerContext, csiCephConnection, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: clusterNamespace}})
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Errorf("failed to delete CSI-operator Ceph Connection %q. %v", csiCephConnection.Name, err)
- } else {
- logger.Infof("deleted CSI-operator Ceph Connection %q", csiCephConnection.Name)
- }
-
- csiOpClientProfile := &csiopv1a1.ClientProfile{}
- err = r.client.DeleteAllOf(r.opManagerContext, csiOpClientProfile, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: clusterNamespace}})
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Errorf("failed to delete CSI-operator client profile %q. %v", csiOpClientProfile.Name, err)
- } else {
- logger.Infof("deleted CSI-operator client profile %q", csiOpClientProfile.Name)
- }
-
- err = r.deleteImageSetConfigMap()
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Error("failed to delete imageSetConfigMap", err)
- }
-
- if deleteOp {
- csiDriver := &csiopv1a1.Driver{}
- err = r.client.DeleteAllOf(r.opManagerContext, csiDriver, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: r.opConfig.OperatorNamespace}})
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Errorf("failed to delete CSI-operator driver config %q. %v", csiDriver.Name, err)
- } else {
- logger.Infof("deleted CSI-operator driver config %q", csiDriver.Name)
- }
-
- opConfig := &csiopv1a1.OperatorConfig{}
- err = r.client.DeleteAllOf(r.opManagerContext, opConfig, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: r.opConfig.OperatorNamespace}})
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Errorf("failed to delete CSI-operator operator config %q. %v", opConfig.Name, err)
- } else {
- logger.Infof("deleted CSI-operator operator config %q", opConfig.Name)
- }
- }
-}
-
func (r *ReconcileCSI) deleteCSIDriverResources(ver *version.Info, daemonset, deployment, service, driverName string) error {
csiDriverobj := v1CsiDriver{}
err := k8sutil.DeleteDaemonset(r.opManagerContext, r.context.Clientset, r.opConfig.OperatorNamespace, daemonset)
diff --git a/pkg/operator/ceph/file/filesystem.go b/pkg/operator/ceph/file/filesystem.go
index 898875c6b7e6..8a35ab1a0231 100644
--- a/pkg/operator/ceph/file/filesystem.go
+++ b/pkg/operator/ceph/file/filesystem.go
@@ -144,6 +144,14 @@ func validateFilesystem(context *clusterd.Context, clusterInfo *cephclient.Clust
if len(f.Spec.DataPools) == 0 {
return nil
}
+
+ // Ensure duplicate pool names are not present in the spec.
+ if len(f.Spec.DataPools) > 1 {
+ if hasDuplicatePoolNames(f.Spec.DataPools) {
+ return errors.New("duplicate pool names in the data pool spec")
+ }
+ }
+
if err := cephpool.ValidatePoolSpec(context, clusterInfo, clusterSpec, &f.Spec.MetadataPool); err != nil {
return errors.Wrap(err, "invalid metadata pool")
}
@@ -157,6 +165,21 @@ func validateFilesystem(context *clusterd.Context, clusterInfo *cephclient.Clust
return nil
}
+func hasDuplicatePoolNames(poolSpecList []cephv1.NamedPoolSpec) bool {
+ poolNames := make(map[string]struct{})
+ for _, poolSpec := range poolSpecList {
+ if poolSpec.Name != "" {
+ if _, has := poolNames[poolSpec.Name]; has {
+ logger.Errorf("duplicate pool name %q in the data pool spec", poolSpec.Name)
+ return true
+ }
+ poolNames[poolSpec.Name] = struct{}{}
+ }
+ }
+
+ return false
+}
+
// newFS creates a new instance of the file (MDS) service
func newFS(name, namespace string) *Filesystem {
return &Filesystem{
diff --git a/pkg/operator/ceph/file/filesystem_test.go b/pkg/operator/ceph/file/filesystem_test.go
index ffe8aaf5dccd..856add3885eb 100644
--- a/pkg/operator/ceph/file/filesystem_test.go
+++ b/pkg/operator/ceph/file/filesystem_test.go
@@ -73,6 +73,26 @@ func TestValidateSpec(t *testing.T) {
assert.Nil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs))
}
+func TestHasDuplicatePoolNames(t *testing.T) {
+ // PoolSpec with no duplicates
+ fs := &cephv1.CephFilesystem{
+ Spec: cephv1.FilesystemSpec{
+ DataPools: []cephv1.NamedPoolSpec{
+ {Name: "pool1"},
+ {Name: "pool2"},
+ },
+ },
+ }
+
+ result := hasDuplicatePoolNames(fs.Spec.DataPools)
+ assert.False(t, result)
+
+ // add duplicate pool name in the spec.
+ fs.Spec.DataPools = append(fs.Spec.DataPools, cephv1.NamedPoolSpec{Name: "pool1"})
+ result = hasDuplicatePoolNames(fs.Spec.DataPools)
+ assert.True(t, result)
+}
+
func TestGenerateDataPoolNames(t *testing.T) {
fs := &Filesystem{Name: "fake", Namespace: "fake"}
fsSpec := cephv1.FilesystemSpec{
diff --git a/pkg/operator/ceph/object/admin.go b/pkg/operator/ceph/object/admin.go
index 5e5a2596f515..67ab074a3b84 100644
--- a/pkg/operator/ceph/object/admin.go
+++ b/pkg/operator/ceph/object/admin.go
@@ -48,6 +48,14 @@ type Context struct {
Zone string
}
+func (c *Context) nsName() string {
+ if c.clusterInfo == nil {
+ logger.Infof("unable to get namespaced name for rgw %s", c.Name)
+ return c.Name
+ }
+ return fmt.Sprintf("%s/%s", c.clusterInfo.Namespace, c.Name)
+}
+
// AdminOpsContext holds the object store context as well as information for connecting to the admin
// ops API.
type AdminOpsContext struct {
@@ -101,9 +109,7 @@ const (
rgwAdminOpsUserCaps = "buckets=*;users=*;usage=read;metadata=read;zone=read"
)
-var (
- rgwAdminOpsUserDisplayName = "RGW Admin Ops User"
-)
+var rgwAdminOpsUserDisplayName = "RGW Admin Ops User"
// NewContext creates a new object store context.
func NewContext(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, name string) *Context {
diff --git a/pkg/operator/ceph/object/controller.go b/pkg/operator/ceph/object/controller.go
index 651978ec9204..82e8b128c820 100644
--- a/pkg/operator/ceph/object/controller.go
+++ b/pkg/operator/ceph/object/controller.go
@@ -441,12 +441,19 @@ func (r *ReconcileCephObjectStore) reconcileCreateObjectStore(cephObjectStore *c
return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to set endpoint", err)
}
+ err = ValidateObjectStorePoolsConfig(cephObjectStore.Spec.MetadataPool, cephObjectStore.Spec.DataPool, cephObjectStore.Spec.SharedPools)
+ if err != nil {
+ return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "invalid pool configuration", err)
+ }
// Reconcile Pool Creation
if !cephObjectStore.Spec.IsMultisite() {
logger.Info("reconciling object store pools")
- err = ConfigurePools(objContext, r.clusterSpec, cephObjectStore.Spec.MetadataPool, cephObjectStore.Spec.DataPool, cephObjectStore.Spec.SharedPools)
- if err != nil {
- return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to create object pools", err)
+
+ if IsNeedToCreateObjectStorePools(cephObjectStore.Spec.SharedPools) {
+ err = CreateObjectStorePools(objContext, r.clusterSpec, cephObjectStore.Spec.MetadataPool, cephObjectStore.Spec.DataPool)
+ if err != nil {
+ return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to create object pools", err)
+ }
}
}
diff --git a/pkg/operator/ceph/object/json_helpers.go b/pkg/operator/ceph/object/json_helpers.go
new file mode 100644
index 000000000000..719339e7db2a
--- /dev/null
+++ b/pkg/operator/ceph/object/json_helpers.go
@@ -0,0 +1,125 @@
+package object
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// getObjProperty - helper function to manipulate JSON Objects.
+// returns nested property of json object.
+// Example:
+//
+// obj = {"a":{"b":"foo"}}
+// // will return "foo"
+// getObjProperty(obj,"a","b")
+func getObjProperty[T string | map[string]interface{} | []interface{}](obj map[string]interface{}, path ...string) (T, error) {
+ var res T
+ if len(path) == 0 {
+ return res, fmt.Errorf("json property path is empty")
+ }
+
+ for i, p := range path {
+ val, ok := obj[p]
+ if !ok {
+ return res, fmt.Errorf("json property %q not found", strings.Join(path[:i+1], "."))
+ }
+ last := i == len(path)-1
+ if last {
+ // last path segment: get result
+ res, ok = val.(T)
+ if !ok {
+ return res, fmt.Errorf("json property %q is not a %T, got %+v", strings.Join(path, "."), res, val)
+ }
+ return res, nil
+ }
+ // walk to the next obj in the path
+ obj, ok = val.(map[string]interface{})
+ if !ok {
+ return res, fmt.Errorf("json property %q is not an object, got %+v", strings.Join(path[:i+1], "."), val)
+ }
+ }
+ // not reachable
+ return res, fmt.Errorf("json property %q not found", strings.Join(path, "."))
+}
+
+// setObjProperty - helper function to manipulate JSON Objects.
+// sets value to json object nested field and returns previous value if presented.
+// Example:
+//
+// obj = {"a":{"b":"foo"}}
+// // will replace "foo" with "bar" and return "foo"
+// setObjProperty(obj,"bar","a","b")
+func setObjProperty[T string | []string | map[string]interface{} | []interface{}](obj map[string]interface{}, val T, path ...string) (T, error) {
+ var prev T
+ if len(path) == 0 {
+ return prev, fmt.Errorf("json property path is empty")
+ }
+ for i, p := range path {
+ last := i == len(path)-1
+ if last {
+ // last path segment: set result and return prev value
+ prevVal, ok := obj[p]
+ if ok {
+ prevRes, ok := prevVal.(T)
+ if ok {
+ prev = prevRes
+ } else {
+ // in go json all arrays are []interface{}, extra conversion for typed arrays (e.g. []string) needed:
+ p := new(T)
+ if castJson(prevVal, p) {
+ prev = *p
+ }
+ }
+ }
+ obj[p] = val
+ return prev, nil
+ }
+ // walk to the next obj in the path
+ next, ok := obj[p]
+ if !ok {
+ return prev, fmt.Errorf("json property %q is not found", strings.Join(path[:i+1], "."))
+ }
+ obj, ok = next.(map[string]interface{})
+ if !ok {
+ return prev, fmt.Errorf("json property %q is not an object, got %+v", strings.Join(path[:i+1], "."), next)
+ }
+ }
+ // not reachable
+ return prev, fmt.Errorf("json property %q not found", strings.Join(path, "."))
+}
+
+// castJson - helper function to manipulate JSON Objects.
+// Tries to cast any type to any type by converting to JSON and back.
+// Returns true on success.
+func castJson(in, out interface{}) bool {
+ bytes, err := json.Marshal(in)
+ if err != nil {
+ return false
+ }
+ err = json.Unmarshal(bytes, out)
+ return err == nil
+}
+
+// toObj - helper function to manipulate JSON Objects.
+// Casts any go struct to map representing JSON object.
+func toObj(val interface{}) (map[string]interface{}, error) {
+ bytes, err := json.Marshal(val)
+ if err != nil {
+ return nil, err
+ }
+ obj := map[string]interface{}{}
+ return obj, json.Unmarshal(bytes, &obj)
+}
+
+// deepCopyJson - helper function to manipulate JSON Objects.
+// Makes deep copy of json object by converting to JSON and back.
+func deepCopyJson(in map[string]interface{}) (map[string]interface{}, error) {
+ bytes, err := json.Marshal(in)
+ if err != nil {
+ return nil, err
+ }
+ res := map[string]interface{}{}
+ err = json.Unmarshal(bytes, &res)
+ return res, err
+}
diff --git a/pkg/operator/ceph/object/json_helpers_test.go b/pkg/operator/ceph/object/json_helpers_test.go
new file mode 100644
index 000000000000..9448c107067c
--- /dev/null
+++ b/pkg/operator/ceph/object/json_helpers_test.go
@@ -0,0 +1,509 @@
+package object
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_getObjPropertyStr(t *testing.T) {
+ type args struct {
+ json string
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ wantErr bool
+ }{
+ {
+ name: "success",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "val",
+ wantErr: false,
+ },
+ {
+ name: "success: empty str",
+ args: args{
+ json: `{"a":{"b":""}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: false,
+ },
+ {
+ name: "err: empty json",
+ args: args{
+ json: `{}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is obj",
+ args: args{
+ json: `{"a":{"b":{"val":"val"}}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is arr",
+ args: args{
+ json: `{"a":{"b":["val1","val2"]}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is bool",
+ args: args{
+ json: `{"a":{"b":true}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is num",
+ args: args{
+ json: `{"a":{"b":5}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is missing",
+ args: args{
+ json: `{"a":{"c":"val"}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ _ = json.Unmarshal([]byte(tt.args.json), &obj)
+ got, err := getObjProperty[string](obj, tt.args.path...)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("getObjProperty() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("getObjProperty() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_getObjPropertyObjArr(t *testing.T) {
+ type args struct {
+ json string
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ want []interface{}
+ wantErr bool
+ }{
+ {
+ name: "success",
+ args: args{
+ json: `{"a":{"b":[
+ {"c":"val1"},
+ {"d":"val2"}
+ ]}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: []interface{}{
+ map[string]interface{}{"c": "val1"},
+ map[string]interface{}{"d": "val2"},
+ },
+ wantErr: false,
+ },
+ {
+ name: "err: empty json",
+ args: args{
+ json: `{}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "err: is obj",
+ args: args{
+ json: `{"a":{"b":{"val":"val"}}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "err: is bool",
+ args: args{
+ json: `{"a":{"b":true}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "err: is num",
+ args: args{
+ json: `{"a":{"b":5}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "err: is missing",
+ args: args{
+ json: `{"a":{"c":"val"}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ _ = json.Unmarshal([]byte(tt.args.json), &obj)
+ got, err := getObjProperty[[]interface{}](obj, tt.args.path...)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("getObjProperty() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("getObjProperty() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_setObjProperty(t *testing.T) {
+ type args struct {
+ json string
+ val string
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantPrev string
+ wantJSON string
+ wantErr bool
+ }{
+ {
+ name: "replace val",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ val: "new val",
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: "val",
+ wantJSON: `{"a":{"b":"new val"}}`,
+ wantErr: false,
+ },
+ {
+ name: "same val",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ val: "val",
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: "val",
+ wantJSON: `{"a":{"b":"val"}}`,
+ wantErr: false,
+ },
+ {
+ name: "add val",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ val: "val2",
+ path: []string{
+ "a", "c",
+ },
+ },
+ wantPrev: "",
+ wantJSON: `{"a":{"b":"val","c":"val2"}}`,
+ wantErr: false,
+ },
+ {
+ name: "add root val",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ val: "val2",
+ path: []string{
+ "c",
+ },
+ },
+ wantPrev: "",
+ wantJSON: `{"a":{"b":"val"},"c":"val2"}`,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.json), &obj)
+ assert.NoError(t, err)
+ prev, err := setObjProperty(obj, tt.args.val, tt.args.path...)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.EqualValues(t, tt.wantPrev, prev)
+ bytes, err := json.Marshal(obj)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+func Test_setObjPropertyObj(t *testing.T) {
+ type args struct {
+ json string
+ val map[string]interface{}
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantPrev map[string]interface{}
+ wantJSON string
+ wantErr bool
+ }{
+ {
+ name: "add obj",
+ args: args{
+ json: `{"a":{"b":{}}}`,
+ val: map[string]interface{}{"c": "val1"},
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: map[string]interface{}{},
+ wantJSON: `{"a":{"b":{"c":"val1"}}}`,
+ wantErr: false,
+ },
+ {
+ name: "set obj",
+ args: args{
+ json: `{"a":{"b":{"c": "val1"}}}`,
+ val: map[string]interface{}{"d": "val2"},
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: map[string]interface{}{"c": "val1"},
+ wantJSON: `{"a":{"b":{"d":"val2"}}}`,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.json), &obj)
+ assert.NoError(t, err)
+ prev, err := setObjProperty(obj, tt.args.val, tt.args.path...)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.EqualValues(t, tt.wantPrev, prev)
+ bytes, err := json.Marshal(obj)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+
+func Test_setObjPropertyArr(t *testing.T) {
+ type args struct {
+ json string
+ val []interface{}
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantPrev []interface{}
+ wantJSON string
+ wantErr bool
+ }{
+ {
+ name: "set obj arr",
+ args: args{
+ json: `{"a":{"b":{}}}`,
+ val: []interface{}{
+ map[string]interface{}{"c": "val1"},
+ map[string]interface{}{"d": "val2"},
+ },
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: nil,
+ wantJSON: `{"a":{"b":[{"c":"val1"},{"d":"val2"}]}}`,
+ wantErr: false,
+ },
+ {
+ name: "add obj arr",
+ args: args{
+ json: `{"a":{"b":[{"c": "val"}]}}`,
+ val: []interface{}{
+ map[string]interface{}{"d": "val1"},
+ map[string]interface{}{"e": "val2"},
+ },
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: []interface{}{
+ map[string]interface{}{"c": "val"},
+ },
+ wantJSON: `{"a":{"b":[{"d":"val1"},{"e":"val2"}]}}`,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.json), &obj)
+ assert.NoError(t, err)
+ prev, err := setObjProperty(obj, tt.args.val, tt.args.path...)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.EqualValues(t, tt.wantPrev, prev)
+ bytes, err := json.Marshal(obj)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+func Test_setObjPropertyStrArr(t *testing.T) {
+ type args struct {
+ json string
+ val []string
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantPrev []string
+ wantJSON string
+ wantErr bool
+ }{
+ {
+ name: "add str arr",
+ args: args{
+ json: `{"a":{"b":{}}}`,
+ val: []string{"c", "d"},
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: nil,
+ wantJSON: `{"a":{"b":["c","d"]}}`,
+ wantErr: false,
+ },
+ {
+ name: "set str arr",
+ args: args{
+ json: `{"a":{"b":["val"]}}`,
+ val: []string{"c", "d"},
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: []string{"val"},
+ wantJSON: `{"a":{"b":["c","d"]}}`,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.json), &obj)
+ assert.NoError(t, err)
+ prev, err := setObjProperty(obj, tt.args.val, tt.args.path...)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.EqualValues(t, tt.wantPrev, prev)
+ bytes, err := json.Marshal(obj)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+
+func Test_deepCopyJson(t *testing.T) {
+ in := map[string]interface{}{
+ "key": []interface{}{"1", "2", "3"},
+ }
+ inCopy, err := deepCopyJson(in)
+ assert.NoError(t, err)
+ assert.EqualValues(t, in, inCopy)
+
+ assert.EqualValues(t, []interface{}{"1", "2", "3"}, in["key"])
+ assert.EqualValues(t, []interface{}{"1", "2", "3"}, inCopy["key"])
+
+ inCopy["key"].([]interface{})[1] = "7"
+
+ assert.EqualValues(t, []interface{}{"1", "2", "3"}, in["key"])
+ assert.EqualValues(t, []interface{}{"1", "7", "3"}, inCopy["key"])
+}
diff --git a/pkg/operator/ceph/object/objectstore.go b/pkg/operator/ceph/object/objectstore.go
index bf194f4f40e4..794419ac8498 100644
--- a/pkg/operator/ceph/object/objectstore.go
+++ b/pkg/operator/ceph/object/objectstore.go
@@ -21,7 +21,7 @@ import (
"encoding/json"
"fmt"
"os"
- "path"
+ "reflect"
"sort"
"strconv"
"strings"
@@ -209,7 +209,6 @@ func deleteSingleSiteRealmAndPools(objContext *Context, spec cephv1.ObjectStoreS
// This is used for quickly getting the name of the realm, zone group, and zone for an object-store to pass into a Context
func getMultisiteForObjectStore(ctx context.Context, clusterdContext *clusterd.Context, spec *cephv1.ObjectStoreSpec, namespace, name string) (string, string, string, error) {
-
if spec.IsExternal() {
// In https://github.com/rook/rook/issues/6342, it was determined that
// a multisite context isn't needed for external mode CephObjectStores.
@@ -749,16 +748,8 @@ func missingPools(context *Context) ([]string, error) {
return missingPools, nil
}
-func ConfigurePools(context *Context, cluster *cephv1.ClusterSpec, metadataPool, dataPool cephv1.PoolSpec, sharedPools cephv1.ObjectSharedPoolsSpec) error {
- if sharedPoolsSpecified(sharedPools) {
- if !EmptyPool(dataPool) || !EmptyPool(metadataPool) {
- return fmt.Errorf("object store shared pools can only be specified if the metadata and data pools are not specified")
- }
- // Shared pools are configured elsewhere
- return nil
- }
-
- if EmptyPool(dataPool) && EmptyPool(metadataPool) {
+func CreateObjectStorePools(context *Context, cluster *cephv1.ClusterSpec, metadataPool, dataPool cephv1.PoolSpec) error {
+ if EmptyPool(dataPool) || EmptyPool(metadataPool) {
logger.Info("no pools specified for the CR, checking for their existence...")
missingPools, err := missingPools(context)
if err != nil {
@@ -793,77 +784,60 @@ func ConfigurePools(context *Context, cluster *cephv1.ClusterSpec, metadataPool,
return nil
}
-func sharedPoolsSpecified(sharedPools cephv1.ObjectSharedPoolsSpec) bool {
- return sharedPools.DataPoolName != "" && sharedPools.MetadataPoolName != ""
-}
-
func ConfigureSharedPoolsForZone(objContext *Context, sharedPools cephv1.ObjectSharedPoolsSpec) error {
- if !sharedPoolsSpecified(sharedPools) {
- logger.Debugf("no shared pools to configure for store %q", objContext.Name)
+ if sharedPools.DataPoolName == "" && sharedPools.MetadataPoolName == "" && len(sharedPools.PoolPlacements) == 0 {
+ logger.Debugf("no shared pools to configure for store %q", objContext.nsName())
return nil
}
+ logger.Infof("configuring shared pools for object store %q", objContext.nsName())
if err := sharedPoolsExist(objContext, sharedPools); err != nil {
return errors.Wrapf(err, "object store cannot be configured until shared pools exist")
}
- // retrieve the zone config
- logger.Infof("Retrieving zone %q", objContext.Zone)
- realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
- zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
- zoneArg := "--rgw-zone=" + objContext.Zone
- args := []string{"zone", "get", realmArg, zoneGroupArg, zoneArg}
-
- output, err := RunAdminCommandNoMultisite(objContext, true, args...)
+ zoneConfig, err := getZoneJSON(objContext)
if err != nil {
- return errors.Wrap(err, "failed to get zone")
+ return err
}
-
- logger.Debugf("Zone config is currently:\n%s", output)
-
- var zoneConfig map[string]interface{}
- err = json.Unmarshal([]byte(output), &zoneConfig)
+ zoneUpdated, err := adjustZoneDefaultPools(zoneConfig, sharedPools)
if err != nil {
- return errors.Wrap(err, "failed to unmarshal zone")
- }
-
- metadataPrefix := fmt.Sprintf("%s:%s.", sharedPools.MetadataPoolName, objContext.Name)
- dataPrefix := fmt.Sprintf("%s:%s.", sharedPools.DataPoolName, objContext.Name)
- expectedDataPool := dataPrefix + "buckets.data"
- if dataPoolIsExpected(objContext, zoneConfig, expectedDataPool) {
- logger.Debugf("Data pool already set as expected to %q", expectedDataPool)
- return nil
- }
-
- logger.Infof("Updating rados namespace configuration for zone %q", objContext.Zone)
- if err := applyExpectedRadosNamespaceSettings(zoneConfig, metadataPrefix, dataPrefix, expectedDataPool); err != nil {
- return errors.Wrap(err, "failed to configure rados namespaces")
+ return err
}
-
- configBytes, err := json.Marshal(zoneConfig)
+ zoneUpdated, err = adjustZonePlacementPools(zoneUpdated, sharedPools)
if err != nil {
- return errors.Wrap(err, "failed to serialize zone config")
+ return err
}
- logger.Debugf("Raw zone settings to apply: %s", string(configBytes))
+ hasZoneChanged := !reflect.DeepEqual(zoneConfig, zoneUpdated)
- configFilename := path.Join(objContext.Context.ConfigDir, objContext.Name+".zonecfg")
- if err := os.WriteFile(configFilename, configBytes, 0600); err != nil {
- return errors.Wrap(err, "failed to write zonfig config file")
+ zoneGroupConfig, err := getZoneGroupJSON(objContext)
+ if err != nil {
+ return err
}
- defer os.Remove(configFilename)
-
- args = []string{"zone", "set", zoneArg, "--infile=" + configFilename, realmArg, zoneGroupArg}
- output, err = RunAdminCommandNoMultisite(objContext, false, args...)
+ zoneGroupUpdated, err := adjustZoneGroupPlacementTargets(zoneGroupConfig, zoneUpdated)
if err != nil {
- return errors.Wrap(err, "failed to set zone config")
+ return err
}
- logger.Debugf("Zone set results=%s", output)
+ hasZoneGroupChanged := !reflect.DeepEqual(zoneGroupConfig, zoneGroupUpdated)
- if err = zoneUpdateWorkaround(objContext, output, expectedDataPool); err != nil {
- return errors.Wrap(err, "failed to apply zone set workaround")
+ // persist configuration updates:
+ if hasZoneChanged {
+ logger.Infof("zone config changed: performing zone config updates for %s", objContext.Zone)
+ updatedZoneResult, err := updateZoneJSON(objContext, zoneUpdated)
+ if err != nil {
+ return fmt.Errorf("unable to persist zone config update for %s: %w", objContext.Zone, err)
+ }
+ if err = zoneUpdateWorkaround(objContext, zoneUpdated, updatedZoneResult); err != nil {
+ return fmt.Errorf("failed to apply zone set workaround: %w", err)
+ }
+ }
+ if hasZoneGroupChanged {
+ logger.Infof("zonegroup config changed: performing zonegroup config updates for %s", objContext.ZoneGroup)
+ _, err = updateZoneGroupJSON(objContext, zoneGroupUpdated)
+ if err != nil {
+ return fmt.Errorf("unable to persist zonegroup config update for %s: %w", objContext.ZoneGroup, err)
+ }
}
- logger.Infof("Successfully configured RADOS namespaces for object store %q", objContext.Name)
return nil
}
@@ -872,139 +846,226 @@ func sharedPoolsExist(objContext *Context, sharedPools cephv1.ObjectSharedPoolsS
if err != nil {
return errors.Wrapf(err, "failed to list pools")
}
- foundMetadataPool := false
- foundDataPool := false
+ existing := make(map[string]struct{}, len(existingPools))
for _, pool := range existingPools {
- if pool.Name == sharedPools.MetadataPoolName {
- foundMetadataPool = true
- }
- if pool.Name == sharedPools.DataPoolName {
- foundDataPool = true
- }
+ existing[pool.Name] = struct{}{}
}
+ // sharedPools.MetadataPoolName, DataPoolName, and sharedPools.PoolPlacements.DataNonECPoolName are optional.
+ // ignore optional pools with empty name:
+ existing[""] = struct{}{}
- if !foundMetadataPool && !foundDataPool {
- return fmt.Errorf("pools do not exist: %q and %q", sharedPools.MetadataPoolName, sharedPools.DataPoolName)
+ if _, ok := existing[sharedPools.MetadataPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: %s", sharedPools.MetadataPoolName)
}
- if !foundMetadataPool {
- return fmt.Errorf("metadata pool does not exist: %q", sharedPools.MetadataPoolName)
+ if _, ok := existing[sharedPools.DataPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: %s", sharedPools.DataPoolName)
}
- if !foundDataPool {
- return fmt.Errorf("data pool does not exist: %q", sharedPools.DataPoolName)
+
+ for _, pp := range sharedPools.PoolPlacements {
+ if _, ok := existing[pp.MetadataPoolName]; !ok {
+ return fmt.Errorf("sharedPool does not exist: pool %s for placement %s", pp.MetadataPoolName, pp.Name)
+ }
+ if _, ok := existing[pp.DataPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: pool %s for placement %s", pp.DataPoolName, pp.Name)
+ }
+ if _, ok := existing[pp.DataNonECPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: pool %s for placement %s", pp.DataNonECPoolName, pp.Name)
+ }
+ for _, sc := range pp.StorageClasses {
+ if _, ok := existing[sc.DataPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: pool %s for StorageClass %s", sc.DataPoolName, sc.Name)
+ }
+ }
}
- logger.Info("verified shared pools exist")
return nil
}
-func applyExpectedRadosNamespaceSettings(zoneConfig map[string]interface{}, metadataPrefix, dataPrefix, dataPool string) error {
- // Update the necessary fields for RAODS namespaces
- zoneConfig["domain_root"] = metadataPrefix + "meta.root"
- zoneConfig["control_pool"] = metadataPrefix + "control"
- zoneConfig["gc_pool"] = metadataPrefix + "log.gc"
- zoneConfig["lc_pool"] = metadataPrefix + "log.lc"
- zoneConfig["log_pool"] = metadataPrefix + "log"
- zoneConfig["intent_log_pool"] = metadataPrefix + "log.intent"
- zoneConfig["usage_log_pool"] = metadataPrefix + "log.usage"
- zoneConfig["roles_pool"] = metadataPrefix + "meta.roles"
- zoneConfig["reshard_pool"] = metadataPrefix + "log.reshard"
- zoneConfig["user_keys_pool"] = metadataPrefix + "meta.users.keys"
- zoneConfig["user_email_pool"] = metadataPrefix + "meta.users.email"
- zoneConfig["user_swift_pool"] = metadataPrefix + "meta.users.swift"
- zoneConfig["user_uid_pool"] = metadataPrefix + "meta.users.uid"
- zoneConfig["otp_pool"] = metadataPrefix + "otp"
- zoneConfig["notif_pool"] = metadataPrefix + "log.notif"
-
- placementPools, ok := zoneConfig["placement_pools"].([]interface{})
- if !ok {
- return fmt.Errorf("failed to parse placement_pools")
+func adjustZoneDefaultPools(zone map[string]interface{}, spec cephv1.ObjectSharedPoolsSpec) (map[string]interface{}, error) {
+ name, err := getObjProperty[string](zone, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get zone name: %w", err)
}
- if len(placementPools) == 0 {
- return fmt.Errorf("no placement pools")
+
+ zone, err = deepCopyJson(zone)
+ if err != nil {
+ return nil, fmt.Errorf("unable to deep copy zone %s: %w", name, err)
+ }
+
+ defaultMetaPool := getDefaultMetadataPool(spec)
+ if defaultMetaPool == "" {
+ // default pool is not presented in shared pool spec
+ return zone, nil
+ }
+ // add zone namespace to metadata pool to safely share accorss rgw instances or zones.
+ // in non-multisite case zone name equals to rgw instance name
+ defaultMetaPool = defaultMetaPool + ":" + name
+ zonePoolNSSuffix := map[string]string{
+ "domain_root": ".meta.root",
+ "control_pool": ".control",
+ "gc_pool": ".log.gc",
+ "lc_pool": ".log.lc",
+ "log_pool": ".log",
+ "intent_log_pool": ".log.intent",
+ "usage_log_pool": ".log.usage",
+ "roles_pool": ".meta.roles",
+ "reshard_pool": ".log.reshard",
+ "user_keys_pool": ".meta.users.keys",
+ "user_email_pool": ".meta.users.email",
+ "user_swift_pool": ".meta.users.swift",
+ "user_uid_pool": ".meta.users.uid",
+ "otp_pool": ".otp",
+ "notif_pool": ".log.notif",
+ }
+ for pool, nsSuffix := range zonePoolNSSuffix {
+ // replace rgw internal index pools with namespaced metadata pool
+ namespacedPool := defaultMetaPool + nsSuffix
+ prev, err := setObjProperty(zone, namespacedPool, pool)
+ if err != nil {
+ return nil, fmt.Errorf("unable to set pool %s for zone %s: %w", pool, name, err)
+ }
+ if namespacedPool != prev {
+ logger.Debugf("update shared pool %s for zone %s: %s -> %s", pool, name, prev, namespacedPool)
+ }
}
- placementPool, ok := placementPools[0].(map[string]interface{})
- if !ok {
- return fmt.Errorf("failed to parse placement_pools[0]")
+ return zone, nil
+}
+
+// There was a radosgw-admin bug that was preventing the RADOS namespace from being applied
+// for the data pool. The fix is included in Reef v18.2.3 or newer, and v19.2.0.
+// The workaround is to run a "radosgw-admin zone placement modify" command to apply
+// the desired data pool config.
+// After Reef (v18) support is removed, this method will be dead code.
+func zoneUpdateWorkaround(objContext *Context, expectedZone, gotZone map[string]interface{}) error {
+ // Update the necessary fields for RAODS namespaces
+ // If the radosgw-admin fix is in the release, the data pool is already applied and we skip the workaround.
+ expected, err := getObjProperty[[]interface{}](expectedZone, "placement_pools")
+ if err != nil {
+ return err
}
- placementVals, ok := placementPool["val"].(map[string]interface{})
- if !ok {
- return fmt.Errorf("failed to parse placement_pools[0].val")
+ got, err := getObjProperty[[]interface{}](gotZone, "placement_pools")
+ if err != nil {
+ return err
}
- placementVals["index_pool"] = metadataPrefix + "buckets.index"
- // The extra pool is for omap data for multi-part uploads, so we use
- // the metadata pool instead of the data pool.
- placementVals["data_extra_pool"] = metadataPrefix + "buckets.non-ec"
- storageClasses, ok := placementVals["storage_classes"].(map[string]interface{})
- if !ok {
- return fmt.Errorf("failed to parse storage_classes")
+ if len(expected) != len(got) {
+ // should not happen
+ return fmt.Errorf("placements were not applied to zone config: expected %+v, got %+v", expected, got)
}
- stdStorageClass, ok := storageClasses["STANDARD"].(map[string]interface{})
- if !ok {
- return fmt.Errorf("failed to parse storage_classes.STANDARD")
+
+ // update pool placements one-by-one if needed
+ for i, expPl := range expected {
+ expPoolObj, ok := expPl.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unable to cast pool placement to object: %+v", expPl)
+ }
+ expPoolName, err := getObjProperty[string](expPoolObj, "key")
+ if err != nil {
+ return fmt.Errorf("unable to get pool placement name: %w", err)
+ }
+
+ gotPoolObj, ok := got[i].(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unable to cast pool placement to object: %+v", got[i])
+ }
+ gotPoolName, err := getObjProperty[string](gotPoolObj, "key")
+ if err != nil {
+ return fmt.Errorf("unable to get pool placement name: %w", err)
+ }
+
+ if expPoolName != gotPoolName {
+ // should not happen
+ return fmt.Errorf("placements were not applied to zone config: expected %+v, got %+v", expected, got)
+ }
+ err = zoneUpdatePlacementWorkaround(objContext, gotPoolName, expPoolObj, gotPoolObj)
+ if err != nil {
+ return fmt.Errorf("unable to do zone update workaround for placement %q: %w", gotPoolName, err)
+ }
}
- stdStorageClass["data_pool"] = dataPool
return nil
}
-func dataPoolIsExpected(objContext *Context, zoneConfig map[string]interface{}, expectedDataPool string) bool {
- placementPools, ok := zoneConfig["placement_pools"].([]interface{})
- if !ok {
- return false
+func zoneUpdatePlacementWorkaround(objContext *Context, placementID string, expect, got map[string]interface{}) error {
+ args := []string{
+ "zone", "placement", "modify",
+ "--rgw-realm=" + objContext.Realm,
+ "--rgw-zonegroup=" + objContext.ZoneGroup,
+ "--rgw-zone=" + objContext.Zone,
+ "--placement-id", placementID,
}
- placementPool, ok := placementPools[0].(map[string]interface{})
- if !ok {
- return false
+ // check index and data pools
+ needsWorkaround := false
+ expPool, err := getObjProperty[string](expect, "val", "index_pool")
+ if err != nil {
+ return err
}
- placementVals, ok := placementPool["val"].(map[string]interface{})
- if !ok {
- return false
+ gotPool, err := getObjProperty[string](got, "val", "index_pool")
+ if err != nil {
+ return err
}
- storageClasses, ok := placementVals["storage_classes"].(map[string]interface{})
- if !ok {
- return false
+ if expPool != gotPool {
+ logger.Infof("do zone update workaround for zone %s, placement %s index pool: %s -> %s", objContext.Zone, placementID, gotPool, expPool)
+ args = append(args, "--index-pool="+expPool)
+ needsWorkaround = true
}
- stdStorageClass, ok := storageClasses["STANDARD"].(map[string]interface{})
- if !ok {
- return false
+ expPool, err = getObjProperty[string](expect, "val", "data_extra_pool")
+ if err != nil {
+ return err
}
- logger.Infof("data pool is currently set to %q", stdStorageClass["data_pool"])
- return stdStorageClass["data_pool"] == expectedDataPool
-}
-
-// There was a radosgw-admin bug that was preventing the RADOS namespace from being applied
-// for the data pool. The fix is included in Reef v18.2.3 or newer, and v19.2.0.
-// The workaround is to run a "radosgw-admin zone placement modify" command to apply
-// the desired data pool config.
-// After Reef (v18) support is removed, this method will be dead code.
-func zoneUpdateWorkaround(objContext *Context, zoneOutput, expectedDataPool string) error {
- var zoneConfig map[string]interface{}
- err := json.Unmarshal([]byte(zoneOutput), &zoneConfig)
+ gotPool, err = getObjProperty[string](got, "val", "data_extra_pool")
if err != nil {
- return errors.Wrap(err, "failed to unmarshal zone")
+ return err
}
- // Update the necessary fields for RAODS namespaces
- // If the radosgw-admin fix is in the release, the data pool is already applied and we skip the workaround.
- if dataPoolIsExpected(objContext, zoneConfig, expectedDataPool) {
- logger.Infof("data pool was already set as expected to %q, workaround not needed", expectedDataPool)
- return nil
+ if expPool != gotPool {
+ logger.Infof("do zone update workaround for zone %s, placement %s data extra pool: %s -> %s", objContext.Zone, placementID, gotPool, expPool)
+ args = append(args, "--data-extra-pool="+expPool)
+ needsWorkaround = true
}
- logger.Infof("Setting data pool to %q", expectedDataPool)
- args := []string{"zone", "placement", "modify",
- "--rgw-realm=" + objContext.Realm,
- "--rgw-zonegroup=" + objContext.ZoneGroup,
- "--rgw-zone=" + objContext.Name,
- "--placement-id", "default-placement",
- "--storage-class", "STANDARD",
- "--data-pool=" + expectedDataPool,
+ if needsWorkaround {
+ _, err = RunAdminCommandNoMultisite(objContext, false, args...)
+ if err != nil {
+ return errors.Wrap(err, "failed to set zone config")
+ }
}
-
- output, err := RunAdminCommandNoMultisite(objContext, false, args...)
+ expSC, err := getObjProperty[map[string]interface{}](expect, "val", "storage_classes")
if err != nil {
- return errors.Wrap(err, "failed to set zone config")
+ return err
+ }
+ gotSC, err := getObjProperty[map[string]interface{}](got, "val", "storage_classes")
+ if err != nil {
+ return err
}
- logger.Debugf("zone placement modify output=%s", output)
- logger.Info("zone placement for the data pool was applied successfully")
+
+ // check storage classes data pools
+ for sc := range expSC {
+ expDP, err := getObjProperty[string](expSC, sc, "data_pool")
+ if err != nil {
+ return err
+ }
+ gotDP, err := getObjProperty[string](gotSC, sc, "data_pool")
+ if err != nil {
+ return err
+ }
+ if expDP == gotDP {
+ continue
+ }
+ logger.Infof("do zone update workaround for zone %s, placement %s storage-class %s pool: %s -> %s", objContext.Zone, placementID, sc, gotDP, expDP)
+ args = []string{
+ "zone", "placement", "modify",
+ "--rgw-realm=" + objContext.Realm,
+ "--rgw-zonegroup=" + objContext.ZoneGroup,
+ "--rgw-zone=" + objContext.Zone,
+ "--placement-id", placementID,
+ "--storage-class", sc,
+ "--data-pool=" + expDP,
+ }
+ output, err := RunAdminCommandNoMultisite(objContext, false, args...)
+ if err != nil {
+ return errors.Wrap(err, "failed to set zone config")
+ }
+ logger.Debugf("zone placement modify output=%s", output)
+ }
+
return nil
}
@@ -1333,3 +1394,17 @@ func CheckIfZonePresentInZoneGroup(objContext *Context) (bool, error) {
}
return false, nil
}
+
+// ValidateObjectStorePoolsConfig returns error if given ObjectStore pool configuration is inconsistent.
+func ValidateObjectStorePoolsConfig(metadataPool, dataPool cephv1.PoolSpec, sharedPools cephv1.ObjectSharedPoolsSpec) error {
+ if err := validatePoolPlacements(sharedPools.PoolPlacements); err != nil {
+ return err
+ }
+ if !EmptyPool(dataPool) && sharedPools.DataPoolName != "" {
+ return fmt.Errorf("invalidObjStorePoolCofig: object store dataPool and sharedPools.dataPool=%s are mutually exclusive. Only one of them can be set.", sharedPools.DataPoolName)
+ }
+ if !EmptyPool(metadataPool) && sharedPools.MetadataPoolName != "" {
+ return fmt.Errorf("invalidObjStorePoolCofig: object store metadataPool and sharedPools.metadataPool=%s are mutually exclusive. Only one of them can be set.", sharedPools.MetadataPoolName)
+ }
+ return nil
+}
diff --git a/pkg/operator/ceph/object/objectstore_test.go b/pkg/operator/ceph/object/objectstore_test.go
index 33d33059629c..7c0d16c851c4 100644
--- a/pkg/operator/ceph/object/objectstore_test.go
+++ b/pkg/operator/ceph/object/objectstore_test.go
@@ -1,5 +1,4 @@
-/*
-Copyright 2016 The Rook Authors. All rights reserved.
+/* Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -18,8 +17,8 @@ package object
import (
"context"
- "encoding/json"
"fmt"
+ "os"
"strings"
"syscall"
"testing"
@@ -112,6 +111,85 @@ const (
"realm_id": "e7f176c6-d207-459c-aa04-c3334300ddc6",
"notif_pool": "rgw-meta-pool:store-a.log.notif"
}`
+ objectZoneSharedPoolsJsonTempl = `{
+ "id": "c1a20ed9-6370-4abd-b78c-bdf0da2a8dbb",
+ "name": "store-a",
+ "domain_root": "%[1]s:store-a.meta.root",
+ "control_pool": "%[1]s:store-a.control",
+ "gc_pool": "%[1]s:store-a.log.gc",
+ "lc_pool": "%[1]s:store-a.log.lc",
+ "log_pool": "%[1]s:store-a.log",
+ "intent_log_pool": "%[1]s:store-a.log.intent",
+ "usage_log_pool": "%[1]s:store-a.log.usage",
+ "roles_pool": "%[1]s:store-a.meta.roles",
+ "reshard_pool": "%[1]s:store-a.log.reshard",
+ "user_keys_pool": "%[1]s:store-a.meta.users.keys",
+ "user_email_pool": "%[1]s:store-a.meta.users.email",
+ "user_swift_pool": "%[1]s:store-a.meta.users.swift",
+ "user_uid_pool": "%[1]s:store-a.meta.users.uid",
+ "otp_pool": "%[1]s:store-a.otp",
+ "system_key": {
+ "access_key": "",
+ "secret_key": ""
+ },
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "data_extra_pool": "%[1]s:store-a.buckets.non-ec",
+ "index_pool": "%[1]s:store-a.buckets.index",
+ "index_type": 0,
+ "inline_data": true,
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "%[2]s:store-a.buckets.data"
+ }
+ }
+ }
+ }
+ ],
+ "realm_id": "e7f176c6-d207-459c-aa04-c3334300ddc6",
+ "notif_pool": "%[1]s:store-a.log.notif"
+}`
+
+ objectZonegroupJson = `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "store-a",
+ "api_name": "test",
+ "is_master": true,
+ "endpoints": [
+ "https://rook-ceph-rgw-test.rook-ceph.svc:443"
+ ],
+ "hostnames": [],
+ "hostnames_s3website": [],
+ "master_zone": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "zones": [
+ {
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "endpoints": [
+ "https://rook-ceph-rgw-test.rook-ceph.svc:443"
+ ]
+ }
+ ],
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd",
+ "sync_policy": {
+ "groups": []
+ },
+ "enabled_features": [
+ "resharding"
+ ]
+}`
//#nosec G101 -- The credentials are just for the unit tests
access_key = "VFKF8SSU9L3L2UR03Z8C"
@@ -149,183 +227,68 @@ func TestReconcileRealm(t *testing.T) {
assert.Nil(t, err)
}
-func TestApplyExpectedRadosNamespaceSettings(t *testing.T) {
- dataPoolName := "testdatapool"
- metaPrefix := "testmeta"
- dataPrefix := "testdata"
- var zoneConfig map[string]interface{}
-
- t.Run("fail when input empty", func(t *testing.T) {
- input := map[string]interface{}{}
- err := applyExpectedRadosNamespaceSettings(input, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.True(t, strings.Contains(err.Error(), "placement_pools"))
- })
- t.Run("valid input", func(t *testing.T) {
- assert.NoError(t, json.Unmarshal([]byte(objectZoneJson), &zoneConfig))
- assert.NoError(t, applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName))
- // validate a sampling of the updated fields
- assert.Equal(t, metaPrefix+"log.notif", zoneConfig["notif_pool"])
- placementPools := zoneConfig["placement_pools"].([]interface{})
- placementPool := placementPools[0].(map[string]interface{})
- placementVals := placementPool["val"].(map[string]interface{})
- storageClasses := placementVals["storage_classes"].(map[string]interface{})
- stdStorageClass := storageClasses["STANDARD"].(map[string]interface{})
- assert.Equal(t, dataPoolName, stdStorageClass["data_pool"])
- })
- t.Run("placement pools empty", func(t *testing.T) {
- // remove expected sections of the json and confirm that it returns an error without throwing an exception
- emptyPlacementPoolsJson := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": []
- }`
- assert.NoError(t, json.Unmarshal([]byte(emptyPlacementPoolsJson), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.True(t, strings.Contains(err.Error(), "no placement pools"))
- })
- t.Run("placement pool value missing", func(t *testing.T) {
- missingPoolValueJson := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": [
- {
- "key": "default-placement"
- }
- ]
- }`
- assert.NoError(t, json.Unmarshal([]byte(missingPoolValueJson), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "placement_pools[0].val")
- })
- t.Run("storage classes missing", func(t *testing.T) {
- storageClassesMissing := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": [
- {
- "key": "default-placement",
- "val": {
- "index_pool": "rgw-meta-pool:store-a.buckets.index"
- }
- }
- ]
- }`
- assert.NoError(t, json.Unmarshal([]byte(storageClassesMissing), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "storage_classes")
- })
- t.Run("standard storage class missing", func(t *testing.T) {
- standardSCMissing := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": [
- {
- "key": "default-placement",
- "val": {
- "index_pool": "rgw-meta-pool:store-a.buckets.index",
- "storage_classes": {
- "BAD": {
- "data_pool": "rgw-data-pool:store-a.buckets.data"
- }
- }
- }
- }
- ]
- }`
- assert.NoError(t, json.Unmarshal([]byte(standardSCMissing), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "storage_classes.STANDARD")
- })
- t.Run("no config missing", func(t *testing.T) {
- nothingMissing := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": [
- {
- "key": "default-placement",
- "val": {
- "index_pool": "rgw-meta-pool:store-a.buckets.index",
- "storage_classes": {
- "STANDARD": {
- "data_pool": "rgw-data-pool:store-a.buckets.data"
- }
- }
- }
- }
- ]
- }`
- assert.NoError(t, json.Unmarshal([]byte(nothingMissing), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.NoError(t, err)
- })
-}
-
-func TestSharedPoolsExist(t *testing.T) {
- executor := &exectest.MockExecutor{}
- poolJson := ""
- mockExecutorFuncOutput := func(command string, args ...string) (string, error) {
- logger.Infof("Command: %s %v", command, args)
- if args[0] == "osd" && args[1] == "lspools" {
- return poolJson, nil
- }
- return "", errors.Errorf("unexpected ceph command %q", args)
- }
- executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
- return mockExecutorFuncOutput(command, args...)
- }
- context := &Context{Context: &clusterd.Context{Executor: executor}, Name: "myobj", clusterInfo: client.AdminTestClusterInfo("mycluster")}
- sharedPools := cephv1.ObjectSharedPoolsSpec{
- MetadataPoolName: "metapool",
- DataPoolName: "datapool",
- }
- poolJson = `[{"poolnum":1,"poolname":".mgr"},{"poolnum":13,"poolname":".rgw.root"},
- {"poolnum":14,"poolname":"rgw-meta-pool"},{"poolnum":15,"poolname":"rgw-data-pool"}]`
- err := sharedPoolsExist(context, sharedPools)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "pools do not exist")
-
- sharedPools.MetadataPoolName = "rgw-meta-pool"
- err = sharedPoolsExist(context, sharedPools)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "data pool does not exist")
-
- sharedPools.DataPoolName = "rgw-data-pool"
- sharedPools.MetadataPoolName = "bad-pool"
- err = sharedPoolsExist(context, sharedPools)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "metadata pool does not exist")
-
- sharedPools.MetadataPoolName = "rgw-meta-pool"
- err = sharedPoolsExist(context, sharedPools)
- assert.NoError(t, err)
-}
-
func TestConfigureStoreWithSharedPools(t *testing.T) {
- dataPoolAlreadySet := "datapool:store-a.buckets.data"
+ sharedMetaPoolAlreadySet, sharedDataPoolAlreadySet := "", ""
zoneGetCalled := false
zoneSetCalled := false
+ zoneGroupGetCalled := false
+ zoneGroupSetCalled := false
placementModifyCalled := false
mockExecutorFuncOutput := func(command string, args ...string) (string, error) {
logger.Infof("Command: %s %v", command, args)
if args[0] == "osd" && args[1] == "lspools" {
- return `[{"poolnum":14,"poolname":"test-meta"},{"poolnum":15,"poolname":"test-data"}]`, nil
+ return `[{"poolnum":14,"poolname":"test-meta"},{"poolnum":15,"poolname":"test-data"},{"poolnum":16,"poolname":"fast-meta"},{"poolnum":17,"poolname":"fast-data"}]`, nil
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
+
executorFuncTimeout := func(timeout time.Duration, command string, args ...string) (string, error) {
logger.Infof("CommandTimeout: %s %v", command, args)
if args[0] == "zone" {
if args[1] == "get" {
zoneGetCalled = true
- replaceDataPool := "rgw-data-pool:store-a.buckets.data"
- return strings.Replace(objectZoneJson, replaceDataPool, dataPoolAlreadySet, -1), nil
+ if sharedDataPoolAlreadySet == "" && sharedMetaPoolAlreadySet == "" {
+ replaceDataPool := "rgw-data-pool:store-a.buckets.data"
+ return strings.Replace(objectZoneJson, replaceDataPool, "datapool:store-a.buckets.data", -1), nil
+ }
+ return fmt.Sprintf(objectZoneSharedPoolsJsonTempl, sharedMetaPoolAlreadySet, sharedDataPoolAlreadySet), nil
} else if args[1] == "set" {
zoneSetCalled = true
+ for _, arg := range args {
+ if !strings.HasPrefix(arg, "--infile=") {
+ continue
+ }
+ file := strings.TrimPrefix(arg, "--infile=")
+ inBytes, err := os.ReadFile(file)
+ if err != nil {
+ panic(err)
+ }
+ return string(inBytes), nil
+ }
return objectZoneJson, nil
} else if args[1] == "placement" && args[2] == "modify" {
placementModifyCalled = true
return objectZoneJson, nil
}
+ } else if args[0] == "zonegroup" {
+ if args[1] == "get" {
+ zoneGroupGetCalled = true
+ return objectZonegroupJson, nil
+ } else if args[1] == "set" {
+ zoneGroupSetCalled = true
+ for _, arg := range args {
+ if !strings.HasPrefix(arg, "--infile=") {
+ continue
+ }
+ file := strings.TrimPrefix(arg, "--infile=")
+ inBytes, err := os.ReadFile(file)
+ if err != nil {
+ panic(err)
+ }
+ return string(inBytes), nil
+ }
+ return objectZonegroupJson, nil
+ }
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
@@ -351,6 +314,8 @@ func TestConfigureStoreWithSharedPools(t *testing.T) {
assert.False(t, zoneGetCalled)
assert.False(t, zoneSetCalled)
assert.False(t, placementModifyCalled)
+ assert.False(t, zoneGroupGetCalled)
+ assert.False(t, zoneGroupSetCalled)
})
t.Run("configure the zone", func(t *testing.T) {
sharedPools := cephv1.ObjectSharedPoolsSpec{
@@ -361,7 +326,27 @@ func TestConfigureStoreWithSharedPools(t *testing.T) {
assert.NoError(t, err)
assert.True(t, zoneGetCalled)
assert.True(t, zoneSetCalled)
- assert.True(t, placementModifyCalled)
+ assert.False(t, placementModifyCalled) // mock returns applied namespases, no workaround needed
+ assert.True(t, zoneGroupGetCalled)
+ assert.False(t, zoneGroupSetCalled) // zone group is set only if extra pool placements specified
+ })
+ t.Run("configure with default placement", func(t *testing.T) {
+ sharedPools := cephv1.ObjectSharedPoolsSpec{
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "test-meta",
+ DataPoolName: "test-data",
+ },
+ },
+ }
+ err := ConfigureSharedPoolsForZone(context, sharedPools)
+ assert.NoError(t, err)
+ assert.True(t, zoneGetCalled)
+ assert.True(t, zoneSetCalled)
+ assert.False(t, placementModifyCalled) // mock returns applied namespases, no workaround needed
+ assert.True(t, zoneGroupGetCalled)
+ assert.False(t, zoneGroupSetCalled) // zone group is set only if extra pool placements specified
})
t.Run("data pool already set", func(t *testing.T) {
// Simulate that the data pool has already been set and the zone update can be skipped
@@ -369,15 +354,40 @@ func TestConfigureStoreWithSharedPools(t *testing.T) {
MetadataPoolName: "test-meta",
DataPoolName: "test-data",
}
- dataPoolAlreadySet = fmt.Sprintf("%s:%s.buckets.data", sharedPools.DataPoolName, context.Zone)
+ sharedMetaPoolAlreadySet, sharedDataPoolAlreadySet = "test-meta", "test-data"
zoneGetCalled = false
zoneSetCalled = false
placementModifyCalled = false
err := ConfigureSharedPoolsForZone(context, sharedPools)
assert.True(t, zoneGetCalled)
assert.False(t, zoneSetCalled)
- assert.False(t, placementModifyCalled)
+ assert.False(t, placementModifyCalled) // mock returns applied namespases, no workaround needed
assert.NoError(t, err)
+ assert.True(t, zoneGroupGetCalled)
+ assert.False(t, zoneGroupSetCalled)
+ })
+ t.Run("configure with extra placement", func(t *testing.T) {
+ sharedPools := cephv1.ObjectSharedPoolsSpec{
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "test-meta",
+ DataPoolName: "test-data",
+ },
+ {
+ Name: "fast",
+ MetadataPoolName: "fast-meta",
+ DataPoolName: "fast-data",
+ },
+ },
+ }
+ err := ConfigureSharedPoolsForZone(context, sharedPools)
+ assert.NoError(t, err)
+ assert.True(t, zoneGetCalled)
+ assert.True(t, zoneSetCalled)
+ assert.False(t, placementModifyCalled) // mock returns applied namespases, no workaround needed
+ assert.True(t, zoneGroupGetCalled)
+ assert.True(t, zoneGroupSetCalled)
})
}
@@ -1482,3 +1492,486 @@ func TestListsAreEqual(t *testing.T) {
})
}
}
+
+func TestValidateObjectStorePoolsConfig(t *testing.T) {
+ type args struct {
+ metadataPool cephv1.PoolSpec
+ dataPool cephv1.PoolSpec
+ sharedPools cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "valid: nothing is set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only metadata pool set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only data pool set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only metadata and data pools set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ dataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only shared metadata pool set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "test",
+ DataPoolName: "",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only shared data pool set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "test",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only shared data and metaData pools set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "test",
+ DataPoolName: "test",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: shared meta and non-shared data",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "test",
+ DataPoolName: "",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: shared data and non-shared meta",
+ args: args{
+ metadataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "test",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "invalid: shared and non-shared meta set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "test",
+ DataPoolName: "",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "invalid: shared and non-shared data set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "test",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "invalid: placements invalid",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "same_name",
+ MetadataPoolName: "",
+ DataPoolName: "",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: "same_name",
+ MetadataPoolName: "",
+ DataPoolName: "",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := ValidateObjectStorePoolsConfig(tt.args.metadataPool, tt.args.dataPool, tt.args.sharedPools); (err != nil) != tt.wantErr {
+ t.Errorf("ValidateObjectStorePoolsConfig() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_sharedPoolsExist(t *testing.T) {
+ type args struct {
+ existsInCluster []string
+ sharedPools cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "all pool exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "meta pool not exists",
+ args: args{
+ existsInCluster: []string{
+ // "meta",
+ "data",
+ "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "data pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ // "data",
+ "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "placement meta pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ // "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "placement data pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ "placement-meta",
+ // "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "placement data non ec pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ "placement-meta",
+ "placement-data",
+ // "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "placement storage class pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ // "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "empty pool names ignored",
+ args: args{
+ existsInCluster: []string{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "",
+ DataPoolName: "",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ executor := &exectest.MockExecutor{}
+ mockExecutorFuncOutput := func(command string, args ...string) (string, error) {
+ if args[0] == "osd" && args[1] == "lspools" {
+ pools := make([]string, len(tt.args.existsInCluster))
+ for i, p := range tt.args.existsInCluster {
+ pools[i] = fmt.Sprintf(`{"poolnum":%d,"poolname":%q}`, i+1, p)
+ }
+ poolJson := fmt.Sprintf(`[%s]`, strings.Join(pools, ","))
+ return poolJson, nil
+ }
+ return "", errors.Errorf("unexpected ceph command %q", args)
+ }
+ executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
+ return mockExecutorFuncOutput(command, args...)
+ }
+ context := &Context{Context: &clusterd.Context{Executor: executor}, Name: "myobj", clusterInfo: client.AdminTestClusterInfo("mycluster")}
+
+ if err := sharedPoolsExist(context, tt.args.sharedPools); (err != nil) != tt.wantErr {
+ t.Errorf("sharedPoolsExist() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/pkg/operator/ceph/object/shared_pools.go b/pkg/operator/ceph/object/shared_pools.go
new file mode 100644
index 000000000000..0ab62b8d71d1
--- /dev/null
+++ b/pkg/operator/ceph/object/shared_pools.go
@@ -0,0 +1,510 @@
+package object
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+ "sort"
+
+ "github.com/pkg/errors"
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ kerrors "k8s.io/apimachinery/pkg/api/errors"
+)
+
+const (
+ defaultPlacementName = "default"
+ defaultPlacementCephConfigName = "default-placement"
+ defaultPlacementStorageClass = "STANDARD"
+)
+
+func IsNeedToCreateObjectStorePools(sharedPools cephv1.ObjectSharedPoolsSpec) bool {
+ for _, pp := range sharedPools.PoolPlacements {
+ if pp.Name == defaultPlacementName {
+ // No need to create pools. External pools from default placement will be used
+ return false
+ }
+ }
+ if sharedPools.MetadataPoolName != "" && sharedPools.DataPoolName != "" {
+ // No need to create pools. Shared pools will be used
+ return false
+ }
+ return true
+}
+
+func validatePoolPlacements(placements []cephv1.PoolPlacementSpec) error {
+ names := make(map[string]struct{}, len(placements))
+ for _, p := range placements {
+ if _, ok := names[p.Name]; ok {
+ return fmt.Errorf("invalidObjStorePoolCofig: invalid placement %s: placement names must be unique", p.Name)
+ }
+ names[p.Name] = struct{}{}
+ if err := validatePoolPlacementStorageClasses(p.StorageClasses); err != nil {
+ return fmt.Errorf("invalidObjStorePoolCofig: invalid placement %s: %w", p.Name, err)
+ }
+ }
+ return nil
+}
+
+func validatePoolPlacementStorageClasses(scList []cephv1.PlacementStorageClassSpec) error {
+ names := make(map[string]struct{}, len(scList))
+ for _, sc := range scList {
+ if sc.Name == defaultPlacementStorageClass {
+ return fmt.Errorf("invalid placement StorageClass %q: %q name is reserved", sc.Name, defaultPlacementStorageClass)
+ }
+ if _, ok := names[sc.Name]; ok {
+ return fmt.Errorf("invalid placement StorageClass %q: name must be unique", sc.Name)
+ }
+ names[sc.Name] = struct{}{}
+ }
+ return nil
+}
+
+func adjustZonePlacementPools(zone map[string]interface{}, spec cephv1.ObjectSharedPoolsSpec) (map[string]interface{}, error) {
+ name, err := getObjProperty[string](zone, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get zone name: %w", err)
+ }
+
+ //deep copy source zone
+ zone, err = deepCopyJson(zone)
+ if err != nil {
+ return nil, fmt.Errorf("unable to deep copy config for zone %s: %w", name, err)
+ }
+
+ placements, err := getObjProperty[[]interface{}](zone, "placement_pools")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placements for zone %s: %w", name, err)
+ }
+
+ fromSpec := toZonePlacementPools(spec, name)
+
+ inConfig := map[string]struct{}{}
+ idxToRemove := map[int]struct{}{}
+ for i, p := range placements {
+ pObj, ok := p.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to cast pool placement to object for zone %s: %+v", name, p)
+ }
+ placementID, err := getObjProperty[string](pObj, "key")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placement name for zone %s: %w", name, err)
+ }
+ // check if placement should be removed
+ if _, inSpec := fromSpec[placementID]; !inSpec && placementID != defaultPlacementCephConfigName {
+ // remove placement if it is not in spec, but don't remove default placement
+ idxToRemove[i] = struct{}{}
+ continue
+ }
+ // update placement with values from spec:
+ if pSpec, inSpec := fromSpec[placementID]; inSpec {
+ _, err = setObjProperty(pObj, pSpec.Val.IndexPool, "val", "index_pool")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set index pool to pool placement %q for zone %q: %w", placementID, name, err)
+ }
+ _, err = setObjProperty(pObj, pSpec.Val.DataExtraPool, "val", "data_extra_pool")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set data extra pool to pool placement %q for zone %q: %w", placementID, name, err)
+ }
+ scObj, err := toObj(pSpec.Val.StorageClasses)
+ if err != nil {
+ return nil, fmt.Errorf("unable convert to pool placement %q storage class for zone %q: %w", placementID, name, err)
+ }
+
+ _, err = setObjProperty(pObj, scObj, "val", "storage_classes")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set storage classes to pool placement %q for zone %q: %w", placementID, name, err)
+ }
+ inConfig[placementID] = struct{}{}
+ }
+ }
+ if len(idxToRemove) != 0 {
+ //delete placements from slice
+ updated := make([]interface{}, 0, len(placements)-len(idxToRemove))
+ for i := range placements {
+ if _, ok := idxToRemove[i]; ok {
+ //remove
+ continue
+ }
+ updated = append(updated, placements[i])
+ }
+ placements = updated
+ }
+
+ // add new placements from spec:
+ for placementID, p := range fromSpec {
+ if _, ok := inConfig[placementID]; ok {
+ //already in config
+ continue
+ }
+ pObj, err := toObj(p)
+ if err != nil {
+ return nil, fmt.Errorf("unable convert pool placement %q for zone %q: %w", placementID, name, err)
+ }
+ placements = append(placements, pObj)
+ }
+
+ _, err = setObjProperty(zone, placements, "placement_pools")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set pool placements for zone %q: %w", name, err)
+ }
+ return zone, nil
+}
+
+func getDefaultMetadataPool(spec cephv1.ObjectSharedPoolsSpec) string {
+ for _, p := range spec.PoolPlacements {
+ if p.Name == defaultPlacementName {
+ return p.MetadataPoolName
+ }
+ }
+ return spec.MetadataPoolName
+}
+
+// toZonePlacementPools converts pool placement CRD definition to zone config json format structures
+func toZonePlacementPools(spec cephv1.ObjectSharedPoolsSpec, ns string) map[string]ZonePlacementPool {
+ hasDefault := false
+ res := make(map[string]ZonePlacementPool, len(spec.PoolPlacements)+1)
+ for _, pp := range spec.PoolPlacements {
+ name := pp.Name
+ if pp.Name == defaultPlacementName {
+ hasDefault = true
+ name = defaultPlacementCephConfigName
+ }
+ res[name] = toZonePlacementPool(pp, ns)
+ }
+ if !hasDefault && spec.DataPoolName != "" && spec.MetadataPoolName != "" {
+ // set shared pools as default if no default placement was provided
+ res[defaultPlacementCephConfigName] = ZonePlacementPool{
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ // The extra pool is for omap data for multi-part uploads, so we use
+ // the metadata pool instead of the data pool.
+ DataExtraPool: spec.MetadataPoolName + ":" + ns + ".buckets.non-ec",
+ IndexPool: spec.MetadataPoolName + ":" + ns + ".buckets.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: spec.DataPoolName + ":" + ns + ".buckets.data",
+ },
+ },
+ // Workaround: radosgw-admin set zone json command sets incorrect default value for placement inline_data field.
+ // So we should set default value (true) explicitly.
+ // See: https://tracker.ceph.com/issues/67933
+ InlineData: true,
+ },
+ }
+ }
+ return res
+}
+
+func toZonePlacementPool(spec cephv1.PoolPlacementSpec, ns string) ZonePlacementPool {
+ placementNS := ns
+ if spec.Name != defaultPlacementName {
+ placementNS += "." + spec.Name
+ }
+ // The extra pool is for omap data for multi-part uploads, so we use
+ // the metadata pool instead of the data pool.
+ nonECPool := spec.MetadataPoolName + ":" + placementNS + ".data.non-ec"
+ if spec.DataNonECPoolName != "" {
+ nonECPool = spec.DataNonECPoolName + ":" + placementNS + ".data.non-ec"
+ }
+
+ res := ZonePlacementPool{
+ Key: spec.Name,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: nonECPool,
+ IndexPool: spec.MetadataPoolName + ":" + placementNS + ".index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: spec.DataPoolName + ":" + placementNS + ".data",
+ },
+ },
+ // Workaround: radosgw-admin set zone json command sets incorrect default value for placement inline_data field.
+ // So we should set default value (true) explicitly.
+ // See: https://tracker.ceph.com/issues/67933
+ InlineData: true,
+ },
+ }
+ if res.Key == defaultPlacementName {
+ res.Key = defaultPlacementCephConfigName
+ }
+ for _, v := range spec.StorageClasses {
+ res.Val.StorageClasses[v.Name] = ZonePlacementStorageClass{
+ DataPool: v.DataPoolName + ":" + ns + "." + v.Name,
+ }
+ }
+ return res
+}
+
+func adjustZoneGroupPlacementTargets(group, zone map[string]interface{}) (map[string]interface{}, error) {
+ name, err := getObjProperty[string](group, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get zonegroup name: %w", err)
+ }
+
+ //deep copy source group
+ group, err = deepCopyJson(group)
+ if err != nil {
+ return nil, fmt.Errorf("unable to deep copy config for zonegroup %s: %w", name, err)
+ }
+
+ _, err = setObjProperty(group, defaultPlacementCephConfigName, "default_placement")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set default_placement for zonegroup %s: %w", name, err)
+ }
+
+ desiredTargets, err := createPlacementTargetsFromZonePoolPlacements(zone)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create targets from placements for zonegroup %q: %w", name, err)
+ }
+ currentTargets, err := getObjProperty[[]interface{}](group, "placement_targets")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get targets from placements for zonegroup %q: %w", name, err)
+ }
+
+ applied := map[string]struct{}{}
+ idxToRemove := map[int]struct{}{}
+ for i, target := range currentTargets {
+ tObj, ok := target.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to cast placement target to object for zonegroup %q: %+v", name, target)
+ }
+ tName, err := getObjProperty[string](tObj, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get placement target name for zonegroup %q: %w", name, err)
+ }
+ // update target:
+ if desired, ok := desiredTargets[tName]; ok {
+ sc := []interface{}{}
+ ok = castJson(desired.StorageClasses, &sc)
+ if ok {
+ _, err = setObjProperty(tObj, sc, "storage_classes")
+ } else {
+ _, err = setObjProperty(tObj, desired.StorageClasses, "storage_classes")
+ }
+ if err != nil {
+ return nil, fmt.Errorf("unable to set storage classes to pool placement target %q for zonegroup %q: %w", tName, name, err)
+ }
+ applied[tName] = struct{}{}
+ } else {
+ // remove target
+ idxToRemove[i] = struct{}{}
+ continue
+ }
+ }
+ if len(idxToRemove) != 0 {
+ //delete targets from slice
+ updated := make([]interface{}, 0, len(currentTargets)-len(idxToRemove))
+ for i := range currentTargets {
+ if _, ok := idxToRemove[i]; ok {
+ //remove
+ continue
+ }
+ updated = append(updated, currentTargets[i])
+ }
+ currentTargets = updated
+ }
+
+ // add new targets:
+ for targetName, target := range desiredTargets {
+ if _, ok := applied[targetName]; ok {
+ //already in config
+ continue
+ }
+ tObj, err := toObj(target)
+ if err != nil {
+ return nil, fmt.Errorf("unable convert placement target %q for zonegroup %q: %w", targetName, name, err)
+ }
+ currentTargets = append(currentTargets, tObj)
+ }
+
+ _, err = setObjProperty(group, currentTargets, "placement_targets")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set placement targets for zonegroup %q: %w", name, err)
+ }
+
+ return group, nil
+}
+
+func createPlacementTargetsFromZonePoolPlacements(zone map[string]interface{}) (map[string]ZonegroupPlacementTarget, error) {
+ zoneName, err := getObjProperty[string](zone, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get zone name: %w", err)
+ }
+
+ zonePoolPlacements, err := getObjProperty[[]interface{}](zone, "placement_pools")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placements for zone %q: %w", zoneName, err)
+ }
+
+ res := make(map[string]ZonegroupPlacementTarget, len(zonePoolPlacements))
+ for _, pp := range zonePoolPlacements {
+ ppObj, ok := pp.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to cast zone pool placement to json obj for zone %q: %+v", zoneName, pp)
+ }
+ name, err := getObjProperty[string](ppObj, "key")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placement key for zone %q: %w", zoneName, err)
+ }
+ storClasses, err := getObjProperty[map[string]interface{}](ppObj, "val", "storage_classes")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placement storage classes for zone %q: %w", zoneName, err)
+ }
+ target := ZonegroupPlacementTarget{
+ Name: name,
+ }
+ for sc := range storClasses {
+ target.StorageClasses = append(target.StorageClasses, sc)
+ }
+ sort.Strings(target.StorageClasses)
+ res[name] = target
+ }
+ return res, nil
+}
+
+func getZoneJSON(objContext *Context) (map[string]interface{}, error) {
+ if objContext.Realm == "" {
+ return nil, fmt.Errorf("get zone: object store realm is missing from context")
+ }
+ realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
+
+ if objContext.Zone == "" {
+ return nil, fmt.Errorf("get zone: object store zone is missing from context")
+ }
+ zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
+
+ logger.Debugf("get zone: rgw-realm=%s, rgw-zone=%s", objContext.Realm, objContext.Zone)
+
+ jsonStr, err := RunAdminCommandNoMultisite(objContext, true, "zone", "get", realmArg, zoneArg)
+ if err != nil {
+ // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
+ // The caller can nicely handle the error and not overflow the op logs with misleading error messages
+ if kerrors.IsNotFound(err) {
+ return nil, err
+ }
+ return nil, errors.Wrap(err, "failed to get rgw zone group")
+ }
+ logger.Debugf("get zone success: rgw-realm=%s, rgw-zone=%s, res=%s", objContext.Realm, objContext.Zone, jsonStr)
+ res := map[string]interface{}{}
+ return res, json.Unmarshal([]byte(jsonStr), &res)
+}
+
+func getZoneGroupJSON(objContext *Context) (map[string]interface{}, error) {
+ if objContext.Realm == "" {
+ return nil, fmt.Errorf("get zonegroup: object store realm is missing from context")
+ }
+ realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
+
+ if objContext.Zone == "" {
+ return nil, fmt.Errorf("get zonegroup: object store zone is missing from context")
+ }
+ zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
+
+ if objContext.ZoneGroup == "" {
+ return nil, fmt.Errorf("get zonegroup: object store zonegroup is missing from context")
+ }
+ zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
+
+ logger.Debugf("get zonegroup: rgw-realm=%s, rgw-zone=%s, rgw-zonegroup=%s", objContext.Realm, objContext.Zone, objContext.ZoneGroup)
+ jsonStr, err := RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg, zoneArg)
+ if err != nil {
+ // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
+ // The caller can nicely handle the error and not overflow the op logs with misleading error messages
+ if kerrors.IsNotFound(err) {
+ return nil, err
+ }
+ return nil, errors.Wrap(err, "failed to get rgw zone group")
+ }
+ logger.Debugf("get zonegroup success: rgw-realm=%s, rgw-zone=%s, rgw-zonegroup=%s, res=%s", objContext.Realm, objContext.Zone, objContext.ZoneGroup, jsonStr)
+ res := map[string]interface{}{}
+ return res, json.Unmarshal([]byte(jsonStr), &res)
+}
+
+func updateZoneJSON(objContext *Context, zone map[string]interface{}) (map[string]interface{}, error) {
+ if objContext.Realm == "" {
+ return nil, fmt.Errorf("update zone: object store realm is missing from context")
+ }
+ realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
+
+ if objContext.Zone == "" {
+ return nil, fmt.Errorf("update zone: object store zone is missing from context")
+ }
+ zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
+
+ configBytes, err := json.Marshal(zone)
+ if err != nil {
+ return nil, err
+ }
+ configFilename := path.Join(objContext.Context.ConfigDir, objContext.Name+".zonecfg")
+ if err := os.WriteFile(configFilename, configBytes, 0600); err != nil {
+ return nil, errors.Wrap(err, "failed to write zone config file")
+ }
+ defer os.Remove(configFilename)
+
+ args := []string{"zone", "set", zoneArg, "--infile=" + configFilename, realmArg}
+ updatedBytes, err := RunAdminCommandNoMultisite(objContext, false, args...)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to set zone config")
+ }
+ logger.Debugf("update zone: %s json config updated value from %q to %q", objContext.Zone, string(configBytes), string(updatedBytes))
+ updated := map[string]interface{}{}
+ err = json.Unmarshal([]byte(updatedBytes), &updated)
+ return updated, err
+}
+
+func updateZoneGroupJSON(objContext *Context, group map[string]interface{}) (map[string]interface{}, error) {
+ if objContext.Realm == "" {
+ return nil, fmt.Errorf("update zonegroup: object store realm is missing from context")
+ }
+ realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
+
+ if objContext.Zone == "" {
+ return nil, fmt.Errorf("update zonegroup: object store zone is missing from context")
+ }
+ zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
+
+ if objContext.ZoneGroup == "" {
+ return nil, fmt.Errorf("update zonegroup: object store zonegroup is missing from context")
+ }
+ zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
+
+ configBytes, err := json.Marshal(group)
+ if err != nil {
+ return nil, err
+ }
+ configFilename := path.Join(objContext.Context.ConfigDir, objContext.Name+".zonegroupcfg")
+ if err := os.WriteFile(configFilename, configBytes, 0600); err != nil {
+ return nil, errors.Wrap(err, "failed to write zonegroup config file")
+ }
+ defer os.Remove(configFilename)
+
+ args := []string{"zonegroup", "set", zoneArg, "--infile=" + configFilename, realmArg, zoneGroupArg}
+ updatedBytes, err := RunAdminCommandNoMultisite(objContext, false, args...)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to set zone config")
+ }
+ updated := map[string]interface{}{}
+ err = json.Unmarshal([]byte(updatedBytes), &updated)
+ return updated, err
+}
+
+type ZonegroupPlacementTarget struct {
+ Name string `json:"name"`
+ StorageClasses []string `json:"storage_classes"`
+}
+
+type ZonePlacementPool struct {
+ Key string `json:"key"`
+ Val ZonePlacementPoolVal `json:"val"`
+}
+
+type ZonePlacementPoolVal struct {
+ DataExtraPool string `json:"data_extra_pool"`
+ IndexPool string `json:"index_pool"`
+ StorageClasses map[string]ZonePlacementStorageClass `json:"storage_classes"`
+ InlineData bool `json:"inline_data"`
+}
+
+type ZonePlacementStorageClass struct {
+ DataPool string `json:"data_pool"`
+}
diff --git a/pkg/operator/ceph/object/shared_pools_test.go b/pkg/operator/ceph/object/shared_pools_test.go
new file mode 100644
index 000000000000..33f3e6ed2484
--- /dev/null
+++ b/pkg/operator/ceph/object/shared_pools_test.go
@@ -0,0 +1,1803 @@
+package object
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_validatePoolPlacements(t *testing.T) {
+ type args struct {
+ placements []cephv1.PoolPlacementSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "valid: names unique",
+ args: args{
+ placements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "name1",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: "name2",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "invalid: duplicate names",
+ args: args{
+ placements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "name",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: "name",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validatePoolPlacements(tt.args.placements); (err != nil) != tt.wantErr {
+ t.Errorf("validatePoolPlacements() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_validatePoolPlacementStorageClasses(t *testing.T) {
+ type args struct {
+ scList []cephv1.PlacementStorageClassSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "valid: unique names",
+ args: args{
+ scList: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "STANDARD_IA",
+ DataPoolName: "", // handled by CRD validation
+ },
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "", // handled by CRD validation
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "invalid: cannot override STANDARD",
+ args: args{
+ scList: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "STANDARD",
+ DataPoolName: "", // handled by CRD validation
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "invalid: duplicate names",
+ args: args{
+ scList: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "STANDARD_IA",
+ DataPoolName: "", // handled by CRD validation
+ },
+ {
+ Name: "STANDARD_IA",
+ DataPoolName: "", // handled by CRD validation
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validatePoolPlacementStorageClasses(tt.args.scList); (err != nil) != tt.wantErr {
+ t.Errorf("validatePoolPlacementStorageClasses() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestIsNeedToCreateObjectStorePools(t *testing.T) {
+ type args struct {
+ sharedPools cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "no need: both shared pools set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ want: false,
+ },
+ {
+ name: "no need: default placement is set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "default",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: false,
+ },
+ {
+ name: "need: only meta shared pool set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ want: true,
+ },
+ {
+ name: "need: only data shared pool set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ want: true,
+ },
+ {
+ name: "need: nothing is set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ want: true,
+ },
+ {
+ name: "need: no default placement is set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "fast",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := IsNeedToCreateObjectStorePools(tt.args.sharedPools); got != tt.want {
+ t.Errorf("IsNeedToCreateObjectStorePools() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_getDefaultMetadataPool(t *testing.T) {
+ type args struct {
+ spec cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {
+ name: "default placement is returned",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "some_name",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec1",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta2",
+ DataPoolName: "data2",
+ DataNonECPoolName: "data-non-ec2",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: "meta2",
+ },
+ {
+ name: "default placement override shared pool",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-shared",
+ DataPoolName: "data-shared",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "some_name",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec1",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta2",
+ DataPoolName: "data2",
+ DataNonECPoolName: "data-non-ec2",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: "meta2",
+ },
+ {
+ name: "shared pool returned if default placement not set",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-shared",
+ DataPoolName: "data-shared",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "some_name",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec1",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: "meta-shared",
+ },
+ {
+ name: "no pool returned",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "data-shared",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "some_name",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec1",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: "",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := getDefaultMetadataPool(tt.args.spec); got != tt.want {
+ t.Errorf("getDefaultMetadataPool() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_toZonePlacementPool(t *testing.T) {
+ type args struct {
+ spec cephv1.PoolPlacementSpec
+ ns string
+ }
+ tests := []struct {
+ name string
+ args args
+ want ZonePlacementPool
+ }{
+ {
+ name: "map default placement without non-ec to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "meta:ns.data.non-ec",
+ IndexPool: "meta:ns.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:ns.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ {
+ name: "map default placement to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "repl",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "repl:ns.data.non-ec",
+ IndexPool: "meta:ns.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:ns.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ {
+ name: "map default placement without extra SC to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "repl",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "repl:ns.data.non-ec",
+ IndexPool: "meta:ns.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.data",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ {
+ name: "map non-default placement without non-ec to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: "placement",
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: "placement",
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "meta:ns.placement.data.non-ec",
+ IndexPool: "meta:ns.placement.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.placement.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:ns.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ {
+ name: "map non-default placement to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: "placement",
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "repl",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: "placement",
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "repl:ns.placement.data.non-ec",
+ IndexPool: "meta:ns.placement.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.placement.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:ns.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equal(t, tt.want, toZonePlacementPool(tt.args.spec, tt.args.ns))
+ })
+ }
+}
+
+func Test_toZonePlacementPools(t *testing.T) {
+ type args struct {
+ spec cephv1.ObjectSharedPoolsSpec
+ ns string
+ }
+ tests := []struct {
+ name string
+ args args
+ want map[string]ZonePlacementPool
+ }{
+ {
+ name: "backward compatible with prev shared pools",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ },
+ ns: "rgw-instance",
+ },
+ want: map[string]ZonePlacementPool{
+ defaultPlacementCephConfigName: {
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "meta:rgw-instance.buckets.non-ec",
+ IndexPool: "meta:rgw-instance.buckets.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ "STANDARD": {
+ DataPool: "data:rgw-instance.buckets.data",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ },
+ {
+ name: "default placement overrides shared pools",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ },
+ },
+ ns: "rgw-instance",
+ },
+ want: map[string]ZonePlacementPool{
+ defaultPlacementCephConfigName: {
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "data-non-ec:rgw-instance.data.non-ec",
+ IndexPool: "meta1:rgw-instance.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data1:rgw-instance.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:rgw-instance.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ },
+ {
+ name: "no default set",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ },
+ },
+ ns: "rgw-instance",
+ },
+ want: map[string]ZonePlacementPool{
+ "placement": {
+ Key: "placement",
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "data-non-ec:rgw-instance.placement.data.non-ec",
+ IndexPool: "meta1:rgw-instance.placement.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data1:rgw-instance.placement.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:rgw-instance.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ },
+ {
+ name: "default shared and placement",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ },
+ },
+ ns: "rgw-instance",
+ },
+ want: map[string]ZonePlacementPool{
+ defaultPlacementCephConfigName: {
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "meta:rgw-instance.buckets.non-ec",
+ IndexPool: "meta:rgw-instance.buckets.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ "STANDARD": {
+ DataPool: "data:rgw-instance.buckets.data",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ "placement": {
+ Key: "placement",
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "data-non-ec:rgw-instance.placement.data.non-ec",
+ IndexPool: "meta1:rgw-instance.placement.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data1:rgw-instance.placement.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:rgw-instance.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equal(t, tt.want, toZonePlacementPools(tt.args.spec, tt.args.ns))
+ })
+ }
+}
+
+func Test_adjustZoneDefaultPools(t *testing.T) {
+ type args struct {
+ beforeJSON string
+ spec cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantJSON string
+ wantChanged bool
+ wantErr bool
+ }{
+ {
+ name: "nothing changed if default shared pool not set",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "DomainRoot",
+ "control_pool": "ControlPool",
+ "gc_pool": "GcPool",
+ "lc_pool": "LcPool",
+ "log_pool": "LogPool",
+ "intent_log_pool": "IntentLogPool",
+ "usage_log_pool": "UsageLogPool",
+ "roles_pool": "RolesPool",
+ "reshard_pool": "ReshardPool",
+ "user_keys_pool": "UserKeysPool",
+ "user_email_pool": "UserEmailPool",
+ "user_swift_pool": "UserSwiftPool",
+ "user_uid_pool": "UserUIDPool",
+ "otp_pool": "OtpPool",
+ "notif_pool": "NotifPool",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "non-default",
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ },
+ },
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "DomainRoot",
+ "control_pool": "ControlPool",
+ "gc_pool": "GcPool",
+ "lc_pool": "LcPool",
+ "log_pool": "LogPool",
+ "intent_log_pool": "IntentLogPool",
+ "usage_log_pool": "UsageLogPool",
+ "roles_pool": "RolesPool",
+ "reshard_pool": "ReshardPool",
+ "user_keys_pool": "UserKeysPool",
+ "user_email_pool": "UserEmailPool",
+ "user_swift_pool": "UserSwiftPool",
+ "user_uid_pool": "UserUIDPool",
+ "otp_pool": "OtpPool",
+ "notif_pool": "NotifPool",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "shared pool set",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "DomainRoot",
+ "control_pool": "ControlPool",
+ "gc_pool": "GcPool",
+ "lc_pool": "LcPool",
+ "log_pool": "LogPool",
+ "intent_log_pool": "IntentLogPool",
+ "usage_log_pool": "UsageLogPool",
+ "roles_pool": "RolesPool",
+ "reshard_pool": "ReshardPool",
+ "user_keys_pool": "UserKeysPool",
+ "user_email_pool": "UserEmailPool",
+ "user_swift_pool": "UserSwiftPool",
+ "user_uid_pool": "UserUIDPool",
+ "otp_pool": "OtpPool",
+ "notif_pool": "NotifPool",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-pool",
+ DataPoolName: "data-pool",
+ PreserveRadosNamespaceDataOnDelete: false,
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "meta-pool:test.meta.root",
+ "control_pool": "meta-pool:test.control",
+ "gc_pool": "meta-pool:test.log.gc",
+ "lc_pool": "meta-pool:test.log.lc",
+ "log_pool": "meta-pool:test.log",
+ "intent_log_pool": "meta-pool:test.log.intent",
+ "usage_log_pool": "meta-pool:test.log.usage",
+ "roles_pool": "meta-pool:test.meta.roles",
+ "reshard_pool": "meta-pool:test.log.reshard",
+ "user_keys_pool": "meta-pool:test.meta.users.keys",
+ "user_email_pool": "meta-pool:test.meta.users.email",
+ "user_swift_pool": "meta-pool:test.meta.users.swift",
+ "user_uid_pool": "meta-pool:test.meta.users.uid",
+ "otp_pool": "meta-pool:test.otp",
+ "notif_pool": "meta-pool:test.log.notif",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "config equals to spec: no changes needed",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "meta-pool:test.meta.root",
+ "control_pool": "meta-pool:test.control",
+ "gc_pool": "meta-pool:test.log.gc",
+ "lc_pool": "meta-pool:test.log.lc",
+ "log_pool": "meta-pool:test.log",
+ "intent_log_pool": "meta-pool:test.log.intent",
+ "usage_log_pool": "meta-pool:test.log.usage",
+ "roles_pool": "meta-pool:test.meta.roles",
+ "reshard_pool": "meta-pool:test.log.reshard",
+ "user_keys_pool": "meta-pool:test.meta.users.keys",
+ "user_email_pool": "meta-pool:test.meta.users.email",
+ "user_swift_pool": "meta-pool:test.meta.users.swift",
+ "user_uid_pool": "meta-pool:test.meta.users.uid",
+ "otp_pool": "meta-pool:test.otp",
+ "notif_pool": "meta-pool:test.log.notif",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-pool",
+ DataPoolName: "data-pool",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "meta-pool:test.meta.root",
+ "control_pool": "meta-pool:test.control",
+ "gc_pool": "meta-pool:test.log.gc",
+ "lc_pool": "meta-pool:test.log.lc",
+ "log_pool": "meta-pool:test.log",
+ "intent_log_pool": "meta-pool:test.log.intent",
+ "usage_log_pool": "meta-pool:test.log.usage",
+ "roles_pool": "meta-pool:test.meta.roles",
+ "reshard_pool": "meta-pool:test.log.reshard",
+ "user_keys_pool": "meta-pool:test.meta.users.keys",
+ "user_email_pool": "meta-pool:test.meta.users.email",
+ "user_swift_pool": "meta-pool:test.meta.users.swift",
+ "user_uid_pool": "meta-pool:test.meta.users.uid",
+ "otp_pool": "meta-pool:test.otp",
+ "notif_pool": "meta-pool:test.log.notif",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}
+`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "default placement pool overrides shared pool",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "DomainRoot",
+ "control_pool": "ControlPool",
+ "gc_pool": "GcPool",
+ "lc_pool": "LcPool",
+ "log_pool": "LogPool",
+ "intent_log_pool": "IntentLogPool",
+ "usage_log_pool": "UsageLogPool",
+ "roles_pool": "RolesPool",
+ "reshard_pool": "ReshardPool",
+ "user_keys_pool": "UserKeysPool",
+ "user_email_pool": "UserEmailPool",
+ "user_swift_pool": "UserSwiftPool",
+ "user_uid_pool": "UserUIDPool",
+ "otp_pool": "OtpPool",
+ "notif_pool": "NotifPool",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "shared-meta-pool",
+ DataPoolName: "shared-data-pool",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta-pool",
+ DataPoolName: "data-pool",
+ },
+ },
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "meta-pool:test.meta.root",
+ "control_pool": "meta-pool:test.control",
+ "gc_pool": "meta-pool:test.log.gc",
+ "lc_pool": "meta-pool:test.log.lc",
+ "log_pool": "meta-pool:test.log",
+ "intent_log_pool": "meta-pool:test.log.intent",
+ "usage_log_pool": "meta-pool:test.log.usage",
+ "roles_pool": "meta-pool:test.meta.roles",
+ "reshard_pool": "meta-pool:test.log.reshard",
+ "user_keys_pool": "meta-pool:test.meta.users.keys",
+ "user_email_pool": "meta-pool:test.meta.users.email",
+ "user_swift_pool": "meta-pool:test.meta.users.swift",
+ "user_uid_pool": "meta-pool:test.meta.users.uid",
+ "otp_pool": "meta-pool:test.otp",
+ "notif_pool": "meta-pool:test.log.notif",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ srcZone := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.beforeJSON), &srcZone)
+ assert.NoError(t, err)
+ changedZone, err := adjustZoneDefaultPools(srcZone, tt.args.spec)
+
+ // check that source was not modified
+ orig := map[string]interface{}{}
+ jErr := json.Unmarshal([]byte(tt.args.beforeJSON), &orig)
+ assert.NoError(t, jErr)
+ assert.EqualValues(t, orig, srcZone, "src was not modified")
+
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.Equal(t, tt.wantChanged, !reflect.DeepEqual(srcZone, changedZone))
+ bytes, err := json.Marshal(&changedZone)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+
+func Test_adjustZonePlacementPools(t *testing.T) {
+ type args struct {
+ beforeJSON string
+ spec cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantJSON string
+ wantChanged bool
+ wantErr bool
+ }{
+ {
+ name: "no changes: shared spec not set",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 0,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 0,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "no changes: spec equal to config",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "meta-pool:test.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "data-pool:test.buckets.data"
+ }
+ },
+ "data_extra_pool": "meta-pool:test.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-pool",
+ DataPoolName: "data-pool",
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "meta-pool:test.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "data-pool:test.buckets.data"
+ }
+ },
+ "data_extra_pool": "meta-pool:test.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "default placement is preserved when non-default placement added",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{{
+ Name: "fast",
+ MetadataPoolName: "fast-meta",
+ DataPoolName: "fast-data",
+ DataNonECPoolName: "fast-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ }},
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "fast",
+ "val": {
+ "index_pool": "fast-meta:test.fast.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "fast-data:test.fast.data"
+ },
+ "REDUCED_REDUNDANCY": {
+ "data_pool": "reduced:test.REDUCED_REDUNDANCY"
+ }
+ },
+ "data_extra_pool": "fast-non-ec:test.fast.data.non-ec",
+ "inline_data": true
+ }
+ }
+
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "delete placement",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "fast",
+ "val": {
+ "index_pool": "fast-meta:test.fast.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "fast-data:test.fast.data"
+ }
+ },
+ "data_extra_pool": "fast-non-ec:test.fast.data.non-ec",
+ "index_type": 0,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "slow",
+ "val": {
+ "index_pool": "slow-meta:test.slow.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "slow-data:test.slow.data"
+ }
+ },
+ "data_extra_pool": "slow-non-ec:test.slow.data.non-ec",
+ "index_type": 0,
+ "inline_data": false
+ }
+ }
+ ]
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "slow",
+ MetadataPoolName: "slow-meta",
+ DataPoolName: "slow-data",
+ DataNonECPoolName: "slow-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "slow",
+ "val": {
+ "index_pool": "slow-meta:test.slow.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "slow-data:test.slow.data"
+ }
+ },
+ "data_extra_pool": "slow-non-ec:test.slow.data.non-ec",
+ "index_type": 0,
+ "inline_data": false
+ }
+ }
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ srcZone := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.beforeJSON), &srcZone)
+ assert.NoError(t, err)
+ changedZone, err := adjustZonePlacementPools(srcZone, tt.args.spec)
+ // check that source zone was not modified:
+ orig := map[string]interface{}{}
+ jErr := json.Unmarshal([]byte(tt.args.beforeJSON), &orig)
+ assert.NoError(t, jErr)
+ assert.EqualValues(t, srcZone, orig, "source obj was not modified")
+
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ bytes, err := json.Marshal(&changedZone)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+
+ assert.EqualValues(t, tt.wantChanged, !reflect.DeepEqual(srcZone, changedZone))
+ })
+ }
+}
+
+func Test_adjustZoneGroupPlacementTargets(t *testing.T) {
+ type args struct {
+ zone string
+ groupBefore string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantGroup string
+ wantChanged bool
+ wantErr bool
+ }{
+ {
+ name: "nothing changed",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "default changed",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "some-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "storage class added",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ },
+ "REDUCED_REDUNDANCY": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "REDUCED_REDUNDANCY","STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "placement added",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ },
+ "REDUCED_REDUNDANCY": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec"
+ }
+ },
+ {
+ "key": "slow",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec"
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "REDUCED_REDUNDANCY","STANDARD"
+ ]
+ },
+ {
+ "name": "slow",
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "placement and sc removed",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "REDUCED_REDUNDANCY","STANDARD"
+ ]
+ },
+ {
+ "name": "slow",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec"
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ zj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.zone), &zj)
+ assert.NoError(t, err)
+ srcGroup := map[string]interface{}{}
+ err = json.Unmarshal([]byte(tt.args.groupBefore), &srcGroup)
+ assert.NoError(t, err)
+ changedGroup, err := adjustZoneGroupPlacementTargets(srcGroup, zj)
+
+ orig := map[string]interface{}{}
+ jErr := json.Unmarshal([]byte(tt.args.groupBefore), &orig)
+ assert.NoError(t, jErr)
+ assert.EqualValues(t, orig, srcGroup, "src was not modified")
+
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.Equal(t, tt.wantChanged, !reflect.DeepEqual(srcGroup, changedGroup))
+ bytes, err := json.Marshal(changedGroup)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantGroup, string(bytes))
+ })
+ }
+}
+
+func Test_createPlacementTargetsFromZonePoolPlacements(t *testing.T) {
+ type args struct {
+ zone string
+ }
+ tests := []struct {
+ name string
+ args args
+ want map[string]ZonegroupPlacementTarget
+ wantErr bool
+ }{
+ {
+ name: "",
+ args: args{
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ },
+ "REDUCED_REDUNDANCY": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "slow",
+ "val": {
+ "index_pool": "slow-meta:test.slow.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "slow-data:test.slow.data"
+ }
+ },
+ "data_extra_pool": "slow-non-ec:test.slow.data.non-ec",
+ "index_type": 0,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ },
+ want: map[string]ZonegroupPlacementTarget{
+ "default-placement": {
+ Name: "default-placement",
+ StorageClasses: []string{"REDUCED_REDUNDANCY", "STANDARD"},
+ },
+ "slow": {
+ Name: "slow",
+ StorageClasses: []string{"STANDARD"},
+ },
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ zo := map[string]interface{}{}
+ _ = json.Unmarshal([]byte(tt.args.zone), &zo)
+ got, err := createPlacementTargetsFromZonePoolPlacements(zo)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("createPlacementTargetsFromZonePoolPlacements() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("createPlacementTargetsFromZonePoolPlacements() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/operator/ceph/object/zone/controller.go b/pkg/operator/ceph/object/zone/controller.go
index 8d787b06340d..e809caaac55e 100644
--- a/pkg/operator/ceph/object/zone/controller.go
+++ b/pkg/operator/ceph/object/zone/controller.go
@@ -289,16 +289,22 @@ func (r *ReconcileObjectZone) createorUpdateCephZone(zone *cephv1.CephObjectZone
func (r *ReconcileObjectZone) createPoolsAndZone(objContext *object.Context, zone *cephv1.CephObjectZone, realmName string, zoneIsMaster bool) error {
// create pools for zone
logger.Debugf("creating pools ceph zone %q", zone.Name)
+ err := object.ValidateObjectStorePoolsConfig(zone.Spec.MetadataPool, zone.Spec.DataPool, zone.Spec.SharedPools)
+ if err != nil {
+ return fmt.Errorf("invalid zone pools config: %w", err)
+ }
+ if object.IsNeedToCreateObjectStorePools(zone.Spec.SharedPools) {
+ err = object.CreateObjectStorePools(objContext, r.clusterSpec, zone.Spec.MetadataPool, zone.Spec.DataPool)
+ if err != nil {
+ return fmt.Errorf("unable to create pools for zone: %w", err)
+ }
+ logger.Debugf("created pools ceph zone %q", zone.Name)
+ }
+
realmArg := fmt.Sprintf("--rgw-realm=%s", realmName)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", zone.Spec.ZoneGroup)
zoneArg := fmt.Sprintf("--rgw-zone=%s", zone.Name)
- err := object.ConfigurePools(objContext, r.clusterSpec, zone.Spec.MetadataPool, zone.Spec.DataPool, zone.Spec.SharedPools)
- if err != nil {
- return errors.Wrapf(err, "failed to create pools for zone %v", zone.Name)
- }
- logger.Debugf("created pools ceph zone %q", zone.Name)
-
accessKeyArg, secretKeyArg, err := object.GetRealmKeyArgs(r.opManagerContext, r.context, realmName, zone.Namespace)
if err != nil {
return errors.Wrap(err, "failed to get keys for realm")
@@ -326,6 +332,12 @@ func (r *ReconcileObjectZone) createPoolsAndZone(objContext *object.Context, zon
return errors.Wrapf(err, "failed to configure rados namespaces for zone")
}
+ // Commit rgw zone config changes
+ err = object.CommitConfigChanges(objContext)
+ if err != nil {
+ return errors.Wrapf(err, "failed to commit zone config changes")
+ }
+
return nil
}
@@ -412,6 +424,7 @@ func (r *ReconcileObjectZone) updateStatus(observedGeneration int64, name types.
}
logger.Debugf("object zone %q status updated to %q", name, status)
}
+
func (r *ReconcileObjectZone) deleteZone(objContext *object.Context) error {
realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
// zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
@@ -481,6 +494,7 @@ func decodePoolPrefixfromZone(data string) (string, error) {
s := strings.Split(domain.DomainRoot, ".rgw.")
return s[0], err
}
+
func (r *ReconcileObjectZone) deleteCephObjectZone(zone *cephv1.CephObjectZone, realmName string) (reconcile.Result, error) {
logger.Debugf("deleting zone CR %q", zone.Name)
objContext := object.NewContext(r.context, r.clusterInfo, zone.Name)