diff --git a/cmd/cstor-pool-mgmt/controller/common/common.go b/cmd/cstor-pool-mgmt/controller/common/common.go index 432fc3ef91..6e8bbad345 100644 --- a/cmd/cstor-pool-mgmt/controller/common/common.go +++ b/cmd/cstor-pool-mgmt/controller/common/common.go @@ -97,6 +97,10 @@ const ( MessageResourceAlreadyPresent EventReason = "Resource already present" // MessageImproperPoolStatus holds message for corresponding failed validate resource. MessageImproperPoolStatus EventReason = "Improper pool status" + // PoolROThreshold holds status for pool read only state + PoolROThreshold EventReason = "PoolReadOnlyThreshold" + // MessagePoolROThreshold holds descriptive message for PoolROThreshold + MessagePoolROThreshold EventReason = "Pool storage limit reached to threshold. Pool expansion is required to make it's replica RW" ) // Periodic interval duration. diff --git a/cmd/cstor-pool-mgmt/controller/pool-controller/handler.go b/cmd/cstor-pool-mgmt/controller/pool-controller/handler.go index 593d6e3e4e..3575897508 100644 --- a/cmd/cstor-pool-mgmt/controller/pool-controller/handler.go +++ b/cmd/cstor-pool-mgmt/controller/pool-controller/handler.go @@ -35,6 +35,7 @@ import ( "github.com/openebs/maya/pkg/util" corev1 "k8s.io/api/core/v1" k8serror "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" @@ -132,6 +133,7 @@ func (c *CStorPoolController) syncHandler(key string, operation common.QueueOper return err } // Synchronize cstor pool used and free capacity fields on CSP object. + // Also verify and handle pool ReadOnly threshold limit // Any kind of sync activity should be done from here. // ToDo: Move status sync (of csp) here from cStorPoolEventHandler function. // ToDo: Instead of having statusSync, capacitySync we can make it generic resource sync which syncs all the @@ -206,7 +208,10 @@ func (c *CStorPoolController) cStorPoolEventHandler(operation common.QueueOperat return status, err } klog.V(4).Infof("Synchronizing cStor pool status for pool %s", cStorPoolGot.ObjectMeta.Name) - status, err := c.getPoolStatus(cStorPoolGot) + status, readOnly, err := c.getPoolStatus(cStorPoolGot) + if err == nil { + cStorPoolGot.Status.ReadOnly = readOnly + } return status, err } klog.Errorf("ignored event '%s' for cstor pool '%s'", string(operation), string(cStorPoolGot.ObjectMeta.Name)) @@ -394,14 +399,14 @@ func (c *CStorPoolController) cStorPoolDestroyEventHandler(cStorPoolGot *apis.CS } // getPoolStatus is a wrapper that fetches the status of cstor pool. -func (c *CStorPoolController) getPoolStatus(cStorPoolGot *apis.CStorPool) (string, error) { - poolStatus, err := pool.Status(string(pool.PoolPrefix) + string(cStorPoolGot.ObjectMeta.UID)) +func (c *CStorPoolController) getPoolStatus(cStorPoolGot *apis.CStorPool) (string, bool, error) { + poolStatus, readOnly, err := pool.Status(string(pool.PoolPrefix) + string(cStorPoolGot.ObjectMeta.UID)) if err != nil { // ToDO : Put error in event recorder c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureStatusSync), string(common.MessageResourceFailStatusSync)) - return "", err + return "", false, err } - return poolStatus, nil + return poolStatus, readOnly, nil } // getPoolResource returns object corresponding to the resource key @@ -562,7 +567,74 @@ func (c *CStorPoolController) syncCsp(cStorPool *apis.CStorPool) { c.recorder.Event(cStorPool, corev1.EventTypeWarning, string(common.FailureCapacitySync), string(common.MessageResourceFailCapacitySync)) } else { cStorPool.Status.Capacity = *capacity + c.updateROMode(cStorPool) + } +} + +// updateROMode update pool readOnly mode and csp status +func (c *CStorPoolController) updateROMode(cStorPool *apis.CStorPool) { + // Note: cStorPool status has been updated by handler prior to this call. + // So it can be different than etcd version. So below checks are done on latest values. + capacity := cStorPool.Status.Capacity + rOThresholdLimit := cStorPool.Spec.PoolSpec.ROThresholdLimit + + qn, err := convertToBytes([]string{capacity.Total, capacity.Free, capacity.Used}) + if err != nil { + klog.Errorf("Failed to parse capacity.. err=%s", err) + return + } + + total, _, used := qn[0], qn[1], qn[2] + usedCapacity := (used * 100) / total + + if (int(usedCapacity) >= rOThresholdLimit) && + (rOThresholdLimit != 0 && + rOThresholdLimit != 100) { + if !cStorPool.Status.ReadOnly { + if err = pool.SetPoolRDMode(cStorPool, true); err != nil { + klog.Errorf("Failed to set pool readOnly mode : %v", err) + } else { + cStorPool.Status.ReadOnly = true + c.recorder.Event(cStorPool, + corev1.EventTypeWarning, + string(common.PoolROThreshold), + string(common.MessagePoolROThreshold), + ) + } + } + } else { + if cStorPool.Status.ReadOnly { + if err = pool.SetPoolRDMode(cStorPool, false); err != nil { + klog.Errorf("Failed to unset pool readOnly mode : %v", err) + } else { + cStorPool.Status.ReadOnly = false + } + } + } + return +} + +func convertToBytes(a []string) (number []int64, err error) { + if len(a) == 0 { + err = errors.New("empty input") + return + } + + defer func() { + if r := recover(); r != nil { + err = errors.New("unable to parse") + } + }() + + parser := func(s string) int64 { + d := resource.MustParse(s + "i") + return d.Value() + } + + for _, v := range a { + number = append(number, parser(v)) } + return } func (c *CStorPoolController) getDeviceIDs(csp *apis.CStorPool) ([]string, error) { diff --git a/cmd/cstor-pool-mgmt/pool/pool.go b/cmd/cstor-pool-mgmt/pool/pool.go index c8f36ca853..dac185ed7b 100644 --- a/cmd/cstor-pool-mgmt/pool/pool.go +++ b/cmd/cstor-pool-mgmt/pool/pool.go @@ -390,62 +390,67 @@ func Capacity(poolName string) (*apis.CStorPoolCapacityAttr, error) { } // PoolStatus finds the status of the pool. -// The ouptut of command(`zpool status `) executed is as follows: +// The ouptut of command(`zpool get -Hp -ovalue health,io.openebs:readonly `) executed is as follows: /* - pool: cstor-530c9c4f-e0df-11e8-94a8-42010a80013b - state: ONLINE - scan: none requested - config: - - NAME STATE READ WRITE CKSUM - cstor-530c9c4f-e0df-11e8-94a8-42010a80013b ONLINE 0 0 0 - scsi-0Google_PersistentDisk_ashu-disk2 ONLINE 0 0 0 - - errors: No known data errors +root@cstor-pool-1dvj-854db8dc56-prblp:/# zpool get -Hp -ovalue health,io.openebs:readonly cstor-3cbec7b9-578d-11ea-b66e-42010a9a0080 +ONLINE +off */ // The output is then parsed by poolStatusOutputParser function to get the status of the pool -func Status(poolName string) (string, error) { +func Status(poolName string) (string, bool, error) { var poolStatus string - statusPoolStr := []string{"status", poolName} + var readOnly bool + + statusPoolStr := []string{"get", "-Hp", "-ovalue", "health,io.openebs:readonly", poolName} stdoutStderr, err := RunnerVar.RunCombinedOutput(zpool.PoolOperator, statusPoolStr...) if err != nil { klog.Errorf("Unable to get pool status: %v", string(stdoutStderr)) - return "", err - } - poolStatus = poolStatusOutputParser(string(stdoutStderr)) - if poolStatus == ZpoolStatusDegraded { - return string(apis.CStorPoolStatusDegraded), nil - } else if poolStatus == ZpoolStatusFaulted { - return string(apis.CStorPoolStatusOffline), nil - } else if poolStatus == ZpoolStatusOffline { - return string(apis.CStorPoolStatusOffline), nil - } else if poolStatus == ZpoolStatusOnline { - return string(apis.CStorPoolStatusOnline), nil - } else if poolStatus == ZpoolStatusRemoved { - return string(apis.CStorPoolStatusDegraded), nil - } else if poolStatus == ZpoolStatusUnavail { - return string(apis.CStorPoolStatusOffline), nil - } else { - return string(apis.CStorPoolStatusError), nil - } + return "", readOnly, err + } + readOnly, poolStatus = poolStatusOutputParser(string(stdoutStderr)) + + poolStatus = func(s string) string { + switch s { + case ZpoolStatusDegraded: + return string(apis.CStorPoolStatusDegraded) + case ZpoolStatusFaulted: + return string(apis.CStorPoolStatusOffline) + case ZpoolStatusOffline: + return string(apis.CStorPoolStatusOffline) + case ZpoolStatusOnline: + return string(apis.CStorPoolStatusOnline) + case ZpoolStatusRemoved: + return string(apis.CStorPoolStatusDegraded) + case ZpoolStatusUnavail: + return string(apis.CStorPoolStatusError) + default: + return string(apis.CStorPoolStatusError) + } + }(poolStatus) + + return poolStatus, readOnly, nil } // poolStatusOutputParser parse output of `zpool status` command to extract the status of the pool. // ToDo: Need to find some better way e.g contract for zpool command outputs. -func poolStatusOutputParser(output string) string { +func poolStatusOutputParser(output string) (bool, string) { var outputStr []string var poolStatus string - if strings.TrimSpace(string(output)) != "" { - outputStr = strings.Split(string(output), "\n") - if !(len(outputStr) < 2) { - poolStatusArr := strings.Split(outputStr[1], ":") - if !(len(outputStr) < 2) { - poolStatus = strings.TrimSpace(poolStatusArr[1]) - } - } + var readOnly bool + + outputStr = strings.Split(string(output), "\n") + + if len(outputStr) != 3 { + klog.Errorf("Invalid input='%s' for poolStatusOutputParser", output) + return readOnly, poolStatus + } + + poolStatus = strings.TrimSpace(string(outputStr[0])) + if outputStr[1] == "on" { + readOnly = true } - return poolStatus + return readOnly, poolStatus } // capacityOutputParser parse output of `zpool get` command to extract the capacity of the pool. @@ -552,3 +557,22 @@ func GetDeviceIDs(csp *apis.CStorPool) ([]string, error) { } return bdDeviceID, nil } + +// SetPoolRDMode set/unset pool readonly +func SetPoolRDMode(csp *apis.CStorPool, isROMode bool) error { + mode := "off" + if isROMode { + mode = "on" + } + + cmd := []string{"set", + "io.openebs:readonly=" + mode, + string(PoolPrefix) + string(csp.ObjectMeta.UID), + } + + stdoutStderr, err := RunnerVar.RunCombinedOutput(zpool.PoolOperator, cmd...) + if err != nil { + return errors.Errorf("Failed to update readOnly mode out:%v err:%v", string(stdoutStderr), err) + } + return nil +} diff --git a/cmd/cstor-pool-mgmt/pool/pool_test.go b/cmd/cstor-pool-mgmt/pool/pool_test.go index bf1c5fef80..f976d7af89 100644 --- a/cmd/cstor-pool-mgmt/pool/pool_test.go +++ b/cmd/cstor-pool-mgmt/pool/pool_test.go @@ -62,6 +62,24 @@ func (r TestRunner) RunCombinedOutput(command string, args ...string) ([]byte, e cs = []string{"-test.run=TestLabelClearerProcess", "--"} env = []string{"labelClearErr=nil"} break + case "get": + if args[1] == "-Hp" { + if len(r.expectedError) != 0 { + return []byte(r.expectedError), nil + } + // Create command arguments + cs = []string{"-test.run=TestStatusHelperProcess", "--", command} + // Set env varibles for the 'TestStatusHelperProcess' function which runs as a process. + env = []string{"GO_WANT_STATUS_HELPER_PROCESS=1", "StatusType=" + os.Getenv("StatusType")} + } else { + if len(r.expectedError) != 0 { + return []byte(r.expectedError), nil + } + // Create command arguments + cs = []string{"-test.run=TestCapacityHelperProcess", "--", command} + // Set env varibles for the 'TestCapacityHelperProcess' function which runs as a process. + env = []string{"GO_WANT_CAPACITY_HELPER_PROCESS=1"} + } case "status": if len(r.expectedError) != 0 { return []byte(r.expectedError), nil @@ -70,14 +88,6 @@ func (r TestRunner) RunCombinedOutput(command string, args ...string) ([]byte, e cs = []string{"-test.run=TestStatusHelperProcess", "--", command} // Set env varibles for the 'TestStatusHelperProcess' function which runs as a process. env = []string{"GO_WANT_STATUS_HELPER_PROCESS=1", "StatusType=" + os.Getenv("StatusType")} - case "get": - if len(r.expectedError) != 0 { - return []byte(r.expectedError), nil - } - // Create command arguments - cs = []string{"-test.run=TestCapacityHelperProcess", "--", command} - // Set env varibles for the 'TestCapacityHelperProcess' function which runs as a process. - env = []string{"GO_WANT_CAPACITY_HELPER_PROCESS=1"} case "set": cs = []string{"-test.run=TestSetCachefileProcess", "--"} env = []string{"SetErr=nil"} @@ -174,67 +184,31 @@ func TestStatusHelperProcess(*testing.T) { // Following constants are different mocked output for `zpool status` command for // different statuses. const ( - mockedStatusOutputOnline = `pool: cstor-530c9c4f-e0df-11e8-94a8-42010a80013b - state: ONLINE - scan: none requested - config: - - NAME STATE READ WRITE CKSUM - cstor-530c9c4f-e0df-11e8-94a8-42010a80013b ONLINE 0 0 0 - scsi-0Google_PersistentDisk_ashu-disk2 ONLINE 0 0 0 - - errors: No known data errors` - mockedStatusOutputOffline = `pool: cstor-530c9c4f-e0df-11e8-94a8-42010a80013b - state: OFFLINE - scan: none requested - config: - - NAME STATE READ WRITE CKSUM - cstor-530c9c4f-e0df-11e8-94a8-42010a80013b OFFLINE 0 0 0 - scsi-0Google_PersistentDisk_ashu-disk2 OFFLINE 0 0 0 - - errors: No known data errors` - mockedStatusOutputRemoved = `pool: cstor-530c9c4f-e0df-11e8-94a8-42010a80013b - state: REMOVED - scan: none requested - config: - - NAME STATE READ WRITE CKSUM - cstor-530c9c4f-e0df-11e8-94a8-42010a80013b REMOVED 0 0 0 - scsi-0Google_PersistentDisk_ashu-disk2 REMOVED 0 0 0 - - errors: No known data errors` - mockedStatusOutputUnavail = `pool: cstor-530c9c4f-e0df-11e8-94a8-42010a80013b - state: UNAVAIL - scan: none requested - config: - - NAME STATE READ WRITE CKSUM - cstor-530c9c4f-e0df-11e8-94a8-42010a80013b UNAVAIL 0 0 0 - scsi-0Google_PersistentDisk_ashu-disk2 UNAVAIL 0 0 0 - - errors: No known data errors` - mockedStatusOutputFaulted = `pool: cstor-530c9c4f-e0df-11e8-94a8-42010a80013b - state: FAULTED - scan: none requested - config: - - NAME STATE READ WRITE CKSUM - cstor-530c9c4f-e0df-11e8-94a8-42010a80013b FAULTED 0 0 0 - scsi-0Google_PersistentDisk_ashu-disk2 FAULTED 0 0 0 - - errors: No known data errors` - mockedStatusOutputDegraded = `pool: cstor-530c9c4f-e0df-11e8-94a8-42010a80013b - state: DEGRADED - scan: none requested - config: - - NAME STATE READ WRITE CKSUM - cstor-530c9c4f-e0df-11e8-94a8-42010a80013b DEGRADED 0 0 0 - scsi-0Google_PersistentDisk_ashu-disk2 DEGRADED 0 0 0 - - errors: No known data errors` + mockedStatusOutputOnline = `ONLINE + +` + + mockedStatusOutputOffline = `OFFLINE +off +` + + mockedStatusOutputRemoved = `REMOVED +on +` + + mockedStatusOutputUnavail = `UNAVAIL + +` + + mockedStatusOutputFaulted = `FAULTED +off +` + + mockedStatusOutputDegraded = `DEGRADED +on +` ) + if os.Getenv("GO_WANT_STATUS_HELPER_PROCESS") != "1" { return } @@ -846,36 +820,44 @@ func TestPoolStatus(t *testing.T) { mockedOutputType string // expectedStatus is the status that is expected for the test case. expectedStatus string + // expectedRO is pool readOnly value that is expected for the test case. + expectedRO bool }{ "#1 OnlinePoolStatus": { poolName: "cstor-530c9c4f-e0df-11e8-94a8-42010a80013b", mockedOutputType: ZpoolStatusOnline, expectedStatus: "Healthy", + expectedRO: false, }, "#2 OfflinePoolStatus": { poolName: "cstor-530c9c4f-e0df-11e8-94a8-42010a80013b", mockedOutputType: ZpoolStatusOffline, expectedStatus: "Offline", + expectedRO: false, }, "#3 UnavailPoolStatus": { poolName: "cstor-530c9c4f-e0df-11e8-94a8-42010a80013b", mockedOutputType: ZpoolStatusUnavail, - expectedStatus: "Offline", + expectedStatus: "Error", + expectedRO: false, }, "#4 RemovedPoolStatus": { poolName: "cstor-530c9c4f-e0df-11e8-94a8-42010a80013b", mockedOutputType: ZpoolStatusRemoved, expectedStatus: "Degraded", + expectedRO: true, }, "#5 FaultedPoolStatus": { poolName: "cstor-530c9c4f-e0df-11e8-94a8-42010a80013b", mockedOutputType: ZpoolStatusFaulted, expectedStatus: "Offline", + expectedRO: false, }, "#6 DegradedPoolStatus": { poolName: "cstor-530c9c4f-e0df-11e8-94a8-42010a80013b", mockedOutputType: ZpoolStatusDegraded, expectedStatus: "Degraded", + expectedRO: true, }, } for name, test := range testPoolResource { @@ -884,13 +866,16 @@ func TestPoolStatus(t *testing.T) { // It will help to decide which mocked output should be considered as a std output. os.Setenv("StatusType", test.mockedOutputType) RunnerVar = TestRunner{} - gotStatus, err := Status(test.poolName) + gotStatus, gotRO, err := Status(test.poolName) if err != nil { t.Fatal("Some error occured in getting pool status:", err) } if test.expectedStatus != gotStatus { t.Errorf("Test case failed as expected status '%s' but got '%s'", test.expectedStatus, gotStatus) } + if test.expectedRO != gotRO { + t.Errorf("Test case failed as expected status '%v' but got '%v'", test.expectedRO, gotRO) + } // Unset the "StatusType" env variable os.Unsetenv("StatusType") }) diff --git a/cmd/maya-apiserver/cstor-operator/spc/handler.go b/cmd/maya-apiserver/cstor-operator/spc/handler.go index 686700c25d..3a33faa8d5 100644 --- a/cmd/maya-apiserver/cstor-operator/spc/handler.go +++ b/cmd/maya-apiserver/cstor-operator/spc/handler.go @@ -339,6 +339,7 @@ var validateFuncList = []validateFunc{ validatePoolType, validateDiskType, validateAutoSpcMaxPool, + validateROThresholdLimit, } // validatePoolType validates pool type in spc. @@ -374,6 +375,16 @@ func validateAutoSpcMaxPool(spc *apis.StoragePoolClaim) error { return nil } +// validateROThresholdLimit validated the RO threshold limit +func validateROThresholdLimit(spc *apis.StoragePoolClaim) error { + if spc.Spec.PoolSpec.ROThresholdLimit < 0 || + spc.Spec.PoolSpec.ROThresholdLimit > 100 { + return errors.Errorf("Invalid pool ROThreshold limit, it should be between 0 to 100") + } + + return nil +} + // getCurrentPoolCount give the current pool count for the given auto provisioned spc. func (c *Controller) getCurrentPoolCount(spc *apis.StoragePoolClaim) (int, error) { // Get the current count of provisioned pool for the storagepool claim diff --git a/cmd/maya-apiserver/cstor-operator/spc/storagepool_create.go b/cmd/maya-apiserver/cstor-operator/spc/storagepool_create.go index 36faaec18e..1658a22120 100644 --- a/cmd/maya-apiserver/cstor-operator/spc/storagepool_create.go +++ b/cmd/maya-apiserver/cstor-operator/spc/storagepool_create.go @@ -72,6 +72,7 @@ func (pc *PoolCreateConfig) getCasPool(spc *apis.StoragePoolClaim) (*apis.CasPoo withPoolCacheFile(spc.Spec.PoolSpec.CacheFile). withAnnotations(spc.Annotations). withMaxPool(spc). + WithPoolROThreshold(spc.Spec.PoolSpec.ROThresholdLimit). Build() casPoolWithDisks, err := pc.withDisks(casPool, spc) if err != nil { @@ -134,6 +135,12 @@ func (cb *CasPoolBuilder) withAnnotations(annotations map[string]string) *CasPoo return cb } +// WithPoolROThreshold set PoolROThreshold value +func (cb *CasPoolBuilder) WithPoolROThreshold(poolROThreshold int) *CasPoolBuilder { + cb.CasPool.PoolROThreshold = poolROThreshold + return cb +} + // Build returns an instance of cas pool object. func (cb *CasPoolBuilder) Build() *apis.CasPool { return cb.CasPool diff --git a/pkg/apis/openebs.io/v1alpha1/cas_pool.go b/pkg/apis/openebs.io/v1alpha1/cas_pool.go index bd850c090e..13c6725c39 100644 --- a/pkg/apis/openebs.io/v1alpha1/cas_pool.go +++ b/pkg/apis/openebs.io/v1alpha1/cas_pool.go @@ -120,4 +120,7 @@ type CasPool struct { DeviceID []string APIBlockDeviceList ndm.BlockDeviceList + + // PoolROThreshold is threshold limit for pool read only mode + PoolROThreshold int } diff --git a/pkg/apis/openebs.io/v1alpha1/cas_template_keys.go b/pkg/apis/openebs.io/v1alpha1/cas_template_keys.go index 2004cc8bd7..bd0203abdd 100644 --- a/pkg/apis/openebs.io/v1alpha1/cas_template_keys.go +++ b/pkg/apis/openebs.io/v1alpha1/cas_template_keys.go @@ -131,6 +131,8 @@ const ( InitPhaseCTP StoragePoolTLPProperty = "phase" // PoolCacheFileCTP is the cache file used in case of imporitng pool PoolCacheFileCTP StoragePoolTLPProperty = "poolCacheFile" + // PoolROThresholdLimitCTP is pool read only threshold limit + PoolROThresholdLimitCTP StoragePoolTLPProperty = "poolROThreshold" ) // VolumeTLPProperty is used to define properties that comes diff --git a/pkg/apis/openebs.io/v1alpha1/cstor_pool.go b/pkg/apis/openebs.io/v1alpha1/cstor_pool.go index 6f78386cd2..825a7b9a88 100644 --- a/pkg/apis/openebs.io/v1alpha1/cstor_pool.go +++ b/pkg/apis/openebs.io/v1alpha1/cstor_pool.go @@ -74,6 +74,8 @@ type CStorPoolAttr struct { OverProvisioning bool `json:"overProvisioning"` //true or false // ThickProvisioning, If true disables OverProvisioning ThickProvisioning bool `json:"thickProvisioning"` // true or false + // ROThresholdLimit is threshold(percentage base) limit for pool read only mode, if ROThresholdLimit(%) of pool storage is used then pool will become readonly, CVR also. (0 < ROThresholdLimit < 100, default:100) + ROThresholdLimit int `json:"roThresholdLimit"` //optional } // CStorPoolPhase is a typed string for phase field of CStorPool. @@ -117,10 +119,15 @@ const ( type CStorPoolStatus struct { Phase CStorPoolPhase `json:"phase"` Capacity CStorPoolCapacityAttr `json:"capacity"` + // LastTransitionTime refers to the time when the phase changes LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - Message string `json:"message,omitempty"` + + //ReadOnly if pool is readOnly or not + ReadOnly bool `json:"readOnly"` + + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` + Message string `json:"message,omitempty"` } // CStorPoolCapacityAttr stores the pool capacity related attributes. diff --git a/pkg/install/v1alpha1/cstor_pool.go b/pkg/install/v1alpha1/cstor_pool.go index bac8d90218..12af8ad2ea 100644 --- a/pkg/install/v1alpha1/cstor_pool.go +++ b/pkg/install/v1alpha1/cstor_pool.go @@ -127,14 +127,14 @@ spec: {{- jsonpath .JsonResult "{.metadata.uid}" | trim | addTo "putcstorpoolcr.objectUID" .TaskResult | noop -}} {{- jsonpath .JsonResult "{.metadata.labels.kubernetes\\.io/hostname}" | trim | addTo "putcstorpoolcr.nodeName" .TaskResult | noop -}} task: |- - {{- $blockDeviceIdList:= toYaml .Storagepool | fromYaml -}} + {{- $storagePool:= toYaml .Storagepool | fromYaml -}} apiVersion: openebs.io/v1alpha1 kind: CStorPool metadata: - name: {{$blockDeviceIdList.owner}}-{{randAlphaNum 4 |lower }} + name: {{$storagePool.owner}}-{{randAlphaNum 4 |lower }} labels: - openebs.io/storage-pool-claim: {{$blockDeviceIdList.owner}} - kubernetes.io/hostname: {{$blockDeviceIdList.nodeName}} + openebs.io/storage-pool-claim: {{$storagePool.owner}} + kubernetes.io/hostname: {{$storagePool.nodeName}} openebs.io/version: {{ .CAST.version }} openebs.io/cas-template-name: {{ .CAST.castName }} openebs.io/cas-type: cstor @@ -143,11 +143,11 @@ spec: blockOwnerDeletion: true controller: true kind: StoragePoolClaim - name: {{$blockDeviceIdList.owner}} + name: {{$storagePool.owner}} uid: {{ .TaskResult.getspc.objectUID }} spec: group: - {{- range $k, $v := $blockDeviceIdList.blockDeviceList }} + {{- range $k, $v := $storagePool.blockDeviceList }} - blockDevice: {{- range $ki, $blockDevice := $v.blockDevice }} - name: {{$blockDevice.name}} @@ -156,9 +156,10 @@ spec: {{- end }} {{- end }} poolSpec: - poolType: {{$blockDeviceIdList.poolType}} - cacheFile: {{$blockDeviceIdList.poolCacheFile}} + poolType: {{$storagePool.poolType}} + cacheFile: {{$storagePool.poolCacheFile}} overProvisioning: false + roThresholdLimit: {{$storagePool.poolROThreshold}} status: phase: Init versionDetails: diff --git a/pkg/install/v1alpha1/openebs_crds.go b/pkg/install/v1alpha1/openebs_crds.go index 09f3e57e59..120e53835b 100644 --- a/pkg/install/v1alpha1/openebs_crds.go +++ b/pkg/install/v1alpha1/openebs_crds.go @@ -199,6 +199,10 @@ spec: name: Status description: Identifies the current health of the pool type: string + - JSONPath: .status.readOnly + description: Identifies the pool read only mode + name: ReadOnly + type: boolean - JSONPath: .spec.poolSpec.poolType name: Type description: The type of the storage pool diff --git a/pkg/storagepool/storagepool.go b/pkg/storagepool/storagepool.go index 3127a325c5..5777c9033d 100644 --- a/pkg/storagepool/storagepool.go +++ b/pkg/storagepool/storagepool.go @@ -93,12 +93,13 @@ func (v *casPoolOperation) Create() (*v1alpha1.CasPool, error) { openebsConfig, string(v1alpha1.StoragePoolTLP), map[string]interface{}{ - string(v1alpha1.OwnerCTP): v.pool.StoragePoolClaim, - string(v1alpha1.BlockDeviceListCTP): v.pool.BlockDeviceList, - string(v1alpha1.NodeNameCTP): v.pool.NodeName, - string(v1alpha1.PoolTypeCTP): v.pool.PoolType, - string(v1alpha1.BlockDeviceIDListCTP): v.pool.DeviceID, - string(v1alpha1.PoolCacheFileCTP): v.pool.PoolCacheFile, + string(v1alpha1.OwnerCTP): v.pool.StoragePoolClaim, + string(v1alpha1.BlockDeviceListCTP): v.pool.BlockDeviceList, + string(v1alpha1.NodeNameCTP): v.pool.NodeName, + string(v1alpha1.PoolTypeCTP): v.pool.PoolType, + string(v1alpha1.BlockDeviceIDListCTP): v.pool.DeviceID, + string(v1alpha1.PoolCacheFileCTP): v.pool.PoolCacheFile, + string(v1alpha1.PoolROThresholdLimitCTP): v.pool.PoolROThreshold, }, ) if err != nil {