From 2880fd8b29cefd6e5b5f3f0ff0abd0d657b74ff4 Mon Sep 17 00:00:00 2001 From: Ashutosh Kumar Date: Tue, 7 Jan 2020 13:12:43 +0530 Subject: [PATCH 1/6] feat(spc): add feature to limit overprovisioning of cstor volumes Signed-off-by: Ashutosh Kumar --- .../cstorpoolselect/v1alpha1/select.go | 176 +++++++++++++++++- .../cstorpoolselect/v1alpha1/select_test.go | 13 +- pkg/apis/openebs.io/v1alpha1/cas_keys.go | 8 + pkg/cstor/pool/v1alpha2/cstorpool.go | 137 ++++++++++---- pkg/cstor/pool/v1alpha2/cstorpool_test.go | 63 ++++--- pkg/install/v1alpha1/cstor_volume.go | 10 +- .../v1alpha1/storagepoolclaim.go | 7 + 7 files changed, 332 insertions(+), 82 deletions(-) diff --git a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go index 849900b66d..4b81241cc1 100644 --- a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go +++ b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go @@ -15,13 +15,18 @@ package v1alpha1 import ( - "errors" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" "strings" "text/template" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" csp "github.com/openebs/maya/pkg/cstor/pool/v1alpha2" + cstorvolume "github.com/openebs/maya/pkg/cstor/volume/v1alpha1" + cstorvolumereplica "github.com/openebs/maya/pkg/cstor/volumereplica/v1alpha1" cvr "github.com/openebs/maya/pkg/cstor/volumereplica/v1alpha1" + spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -92,6 +97,11 @@ const ( // the policy that does a best effort to select // the given host to place storage preferScheduleOnHostAnnotationPolicy policyName = "prefer-schedule-on-host" + + // overProvisioningPolicy is the name of + // the policy that selects the given pool to + // place storage according to overProvisioning policy + overProvisioningPolicy policyName = "overProvisioning" ) // policy exposes contracts that need @@ -103,6 +113,93 @@ type policy interface { filter(*csp.CSPList) (*csp.CSPList, error) } +// scheduleWithOverProvisioningAwareness is a pool +// selection implementation. +type scheduleWithOverProvisioningAwareness struct { + overProvisioning bool + spcName string + volumeNamespace string + capacity resource.Quantity + err []error +} + +// priority returns the priority of the +// policy implementation +func (p scheduleWithOverProvisioningAwareness) priority() priority { + return highPriority +} + +// name returns the name of the policy +// implementation +func (p scheduleWithOverProvisioningAwareness) name() policyName { + return overProvisioningPolicy +} + +// filter selects the pools available on the host +// for which the policy has been applied +func (p scheduleWithOverProvisioningAwareness) filter(pools *csp.CSPList) (*csp.CSPList, error) { + if len(p.err) > 0 { + return nil, errors.Errorf("failed to fetch overprovisioning details:%v", p.err) + } + + if p.overProvisioning { + klog.Info("Overprovisioning restriction policy not added as overprovisioning is enabled") + return pools, nil + } + + filteredPools := csp.ListBuilder().List() + for _, pool := range pools.Items { + volCap, err := getAllVolumeCapacity(pool.Object) + if err != nil { + return nil, errors.Wrapf(err, "failed to get capacity consumed by existing volumes on pool %s ", pool.Object.UID) + } + if pool.HasSpace(p.capacity, volCap) { + filteredPools.Items = append(filteredPools.Items, pool) + } else { + klog.V(2).Infof("CSP with UID %s rejected due to over provisioning policy", pool.Object.UID) + } + } + return filteredPools, nil +} + +// getAllVolumeCapacity returns the sum of total capacities of all the volumes +// present of the given CSP. +func getAllVolumeCapacity(csp *apis.CStorPool) (resource.Quantity, error) { + var totalcapcity resource.Quantity + cstorVolumeMap := make(map[string]bool) + label := string(apis.CStorPoolKey) + "=" + csp.Name + cstorVolumeReplicaObjList, err := cstorvolumereplica.NewKubeclient().WithNamespace("openebs").List(metav1.ListOptions{LabelSelector: label}) + if err != nil { + return resource.Quantity{}, errors.Wrapf(err, "error in listing all cvr resources present on %s csp", csp.Name) + } + for _, cvr := range cstorVolumeReplicaObjList.Items { + if cvr.Labels == nil { + return resource.Quantity{}, errors.Errorf("No labels found on cvr %s", cvr.Name) + } + cstorVolumeMap[cvr.Labels[string(apis.CStorVolumeKey)]] = true + } + + for cv, _ := range cstorVolumeMap { + cap, err := getCStorVolumeCapacity(cv) + if err != nil { + return resource.Quantity{}, errors.Wrapf(err, "failed to get capacity for cstorvolume %s", cv) + } + cap.Add(totalcapcity) + } + + return totalcapcity, nil + +} + +// getCStorVolumeCapacity returns the capacity present on a CStorVolume CR. +func getCStorVolumeCapacity(name string) (resource.Quantity, error) { + cv, err := cstorvolume.NewKubeclient().WithNamespace("openebs").Get(name, metav1.GetOptions{}) + if err != nil { + return resource.Quantity{}, errors.Wrapf(err, "failed to fetch cstorvolume %s", name) + } + return cv.Spec.Capacity, nil +} + // scheduleOnHost is a pool selection // implementation type scheduleOnHost struct { @@ -374,7 +471,7 @@ type buildOption func(*selection) func withDefaultSelection(s *selection) { if string(s.mode) == "" { - s.mode = singleExection + s.mode = multiExecution } } @@ -447,6 +544,69 @@ func PreferScheduleOnHostAnnotation(hostNameAnnotation string) buildOption { } } +// CapacityAwareScheduling adds scheduleWithOverProvisioningAwareness as a policy +// to be used during pool selection. +func CapacityAwareScheduling(values ...string) buildOption { + return func(s *selection) { + var err error + overProvisioningPolicy := &scheduleWithOverProvisioningAwareness{} + + spcName := getSPCName(values...) + if strings.TrimSpace(spcName) == "" { + err = errors.New("Got empty storage pool claim from runtask") + overProvisioningPolicy.err = append(overProvisioningPolicy.err, err) + + } + overProvisioningPolicy.spcName = spcName + + volCapacity, err := getVolumeCapacity(values...) + if err != nil { + overProvisioningPolicy.err = append(overProvisioningPolicy.err, err) + } + overProvisioningPolicy.capacity = volCapacity + + if len(overProvisioningPolicy.err) == 0 { + spc, err := getSPC(spcName) + if err != nil { + overProvisioningPolicy.err = append(overProvisioningPolicy.err, err) + } else { + if spc.Spec.PoolSpec.OverProvisioning { + overProvisioningPolicy.overProvisioning = true + } + } + } + s.policies.add(overProvisioningPolicy) + } +} + +func getSPCName(values ...string) string { + + for _, val := range values { + if strings.Contains(val, string("openebs.io/storage-pool-claim")) { + str := strings.Split(val, "=") + return str[1] + } + } + return "" +} + +func getSPC(name string) (*apis.StoragePoolClaim, error) { + return spc.NewKubeClient().Get(name, metav1.GetOptions{}) +} + +func getVolumeCapacity(values ...string) (resource.Quantity, error) { + var capacity string + for _, val := range values { + if strings.Contains(val, string("volume.kubernetes.io/capacity")) { + str := strings.Split(val, "=") + capacity = str[1] + } + } + + return resource.ParseQuantity(capacity) + +} + // GetPolicies returns the appropriate selection // policies based on the provided values func GetPolicies(values ...string) []buildOption { @@ -460,6 +620,7 @@ func GetPolicies(values ...string) []buildOption { opts = append(opts, AntiAffinityLabel(val)) } } + opts = append(opts, CapacityAwareScheduling(values...)) return opts } @@ -527,10 +688,11 @@ func FilterPoolIDs(entries *csp.CSPList, opts []buildOption) ([]string, error) { // go template functions func TemplateFunctions() template.FuncMap { return template.FuncMap{ - "cspGetPolicies": GetPolicies, - "cspFilterPoolIDs": FilterPoolIDs, - "cspAntiAffinity": AntiAffinityLabel, - "cspPreferAntiAffinity": PreferAntiAffinityLabel, - "preferScheduleOnHost": PreferScheduleOnHostAnnotation, + "cspGetPolicies": GetPolicies, + "cspFilterPoolIDs": FilterPoolIDs, + "cspAntiAffinity": AntiAffinityLabel, + "cspPreferAntiAffinity": PreferAntiAffinityLabel, + "preferScheduleOnHost": PreferScheduleOnHostAnnotation, + "capacityAwareScheduling": CapacityAwareScheduling, } } diff --git a/pkg/algorithm/cstorpoolselect/v1alpha1/select_test.go b/pkg/algorithm/cstorpoolselect/v1alpha1/select_test.go index 9ac0e2a63a..58926dd407 100644 --- a/pkg/algorithm/cstorpoolselect/v1alpha1/select_test.go +++ b/pkg/algorithm/cstorpoolselect/v1alpha1/select_test.go @@ -81,6 +81,7 @@ func fakeCSPListOk(uids ...string) *csp.CSPList { func fakeCSPListScheduleOnHostOk(hostuid string, uids ...string) *csp.CSPList { fakeMap := map[string]string{} + fakeCapacityMap := map[string]string{} for _, uid := range uids { if hostuid == uid { fakeMap[hostuid] = fakeValidHost @@ -88,7 +89,7 @@ func fakeCSPListScheduleOnHostOk(hostuid string, uids ...string) *csp.CSPList { fakeMap[uid] = fakeinvalidHost } } - return csp.ListBuilder().WithUIDNode(fakeMap).List() + return csp.ListBuilder().WithUIDNode(fakeMap, fakeCapacityMap).List() } func fakeCVRListOk(uids ...string) cvrListFn { @@ -528,7 +529,7 @@ func TestTemplateFunctionsCount(t *testing.T) { tests := map[string]struct { expectedLength int }{ - "Test 1": {5}, + "Test 1": {6}, } for name, test := range tests { @@ -636,10 +637,10 @@ func TestGetPolicies(t *testing.T) { selectors []string expectedBuildoptions []buildOption }{ - "Test 1": {selectors: []string{}, expectedBuildoptions: []buildOption{}}, - "Test 2": {selectors: []string{fakeAntiAffinitySelector}, expectedBuildoptions: []buildOption{AntiAffinityLabel(fakeAntiAffinitySelector)}}, - "Test 3": {selectors: []string{fakePreferAntiAffinitySelector}, expectedBuildoptions: []buildOption{PreferAntiAffinityLabel(fakePreferAntiAffinitySelector)}}, - "Test 4": {selectors: []string{fakePreferScheduleOnHostSelector}, expectedBuildoptions: []buildOption{PreferScheduleOnHostAnnotation(fakePreferScheduleOnHostSelector)}}, + "Test 1": {selectors: []string{}, expectedBuildoptions: []buildOption{CapacityAwareScheduling()}}, + "Test 2": {selectors: []string{fakeAntiAffinitySelector}, expectedBuildoptions: []buildOption{AntiAffinityLabel(fakeAntiAffinitySelector), CapacityAwareScheduling()}}, + "Test 3": {selectors: []string{fakePreferAntiAffinitySelector}, expectedBuildoptions: []buildOption{PreferAntiAffinityLabel(fakePreferAntiAffinitySelector), CapacityAwareScheduling()}}, + "Test 4": {selectors: []string{fakePreferScheduleOnHostSelector}, expectedBuildoptions: []buildOption{PreferScheduleOnHostAnnotation(fakePreferScheduleOnHostSelector), CapacityAwareScheduling()}}, } for name, test := range tests { t.Run(name, func(t *testing.T) { diff --git a/pkg/apis/openebs.io/v1alpha1/cas_keys.go b/pkg/apis/openebs.io/v1alpha1/cas_keys.go index 4ccef89e93..357f80c278 100644 --- a/pkg/apis/openebs.io/v1alpha1/cas_keys.go +++ b/pkg/apis/openebs.io/v1alpha1/cas_keys.go @@ -85,6 +85,14 @@ const ( // PredecessorBDKey is the key to fetch the predecessor BD in case of // block device replacement. PredecessorBDKey = "openebs.io/bd-predecessor" + + // CStorPoolKey is the key to fetch the CVRs on a specific + // CStor pool. This key is present on CVR labels + CStorPoolKey CASKey = "cstorpool.openebs.io/name" + + // cstorVolumeKey is the key to getch CStorVolume CR of a + // CVR. This key is present on CVR label. + CStorVolumeKey CASKey = "cstorvolume.openebs.io/name" ) // CASPlainKey represents a openebs key used either in resource annotation diff --git a/pkg/cstor/pool/v1alpha2/cstorpool.go b/pkg/cstor/pool/v1alpha2/cstorpool.go index 06a0c1ecb5..351710f21c 100644 --- a/pkg/cstor/pool/v1alpha2/cstorpool.go +++ b/pkg/cstor/pool/v1alpha2/cstorpool.go @@ -15,6 +15,9 @@ package v1alpha2 import ( + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" "text/template" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" @@ -25,22 +28,31 @@ type annotationKey string const scheduleOnHostAnnotation annotationKey = "volume.kubernetes.io/selected-node" -type csp struct { - // actual cstor pool object - object *apis.CStorPool +const ( + // PoolCapacityThresholdPercentage is the threshold percentage for usable pool size. + // Example: + // If pool size is 100 Gi and threshold percentae is 70 then + // 70 Gi is the usable pool capacity. + PoolCapacityThresholdPercentage = 70 +) + +// CSP encapsulates the CStorPool API object. +type CSP struct { + // actual cstor pool Object + Object *apis.CStorPool } // predicate defines an abstraction // to determine conditional checks -// against the provided csp instance -type predicate func(*csp) bool +// against the provided CSP instance +type predicate func(*CSP) bool type predicateList []predicate // all returns true if all the predicates -// succeed against the provided csp +// succeed against the provided CSP // instance -func (l predicateList) all(c *csp) bool { +func (l predicateList) all(c *CSP) bool { for _, pred := range l { if !pred(c) { return false @@ -49,13 +61,13 @@ func (l predicateList) all(c *csp) bool { return true } -// IsNotUID returns true if provided csp +// IsNotUID returns true if provided CSP // instance's UID does not match with any // of the provided UIDs func IsNotUID(uids ...string) predicate { - return func(c *csp) bool { + return func(c *CSP) bool { for _, uid := range uids { - if uid == string(c.object.GetUID()) { + if uid == string(c.Object.GetUID()) { return false } } @@ -63,12 +75,64 @@ func IsNotUID(uids ...string) predicate { } } +// HasSpace returns true if the CSP has free space to accomodate the incoming volume. +func (c *CSP) HasSpace(incomingVolCap, capacityUsedByExistingVols resource.Quantity) bool { + totalUsableCapacityOnPool, err := c.UsableFreeCapacityOnCSP(capacityUsedByExistingVols) + if err != nil { + klog.Error(err) + return false + } + if totalUsableCapacityOnPool.Cmp(incomingVolCap) == 1 { + return true + } + return false +} + +// UsableFreeCapacityOnCSP returns the total usable free capacity present on the CSP. +// Example: +// If UsableFreeCapacityOnCSP is 30 Gi than the pool (CSP) can accomodate any volume +// of size less than(or equal to) 30 Gi +func (c *CSP) UsableFreeCapacityOnCSP(capacityUsedByExistingVols resource.Quantity) (resource.Quantity, error) { + usableCapcityOnCSP, err := c.GetTotalUsableCapacityWithThreshold() + if err != nil { + return resource.Quantity{}, errors.Wrapf(err, "failed to get total usable capacity on CSP %s", c.Object.Name) + } + usableCapcityOnCSP.Sub(capacityUsedByExistingVols) + return usableCapcityOnCSP, nil +} + +// GetTotalUsableCapacityWithThreshold returns the usable capacity on pool. +// Example: +// If TotalUsableCapacityWithThreshold is 70 Gi than it is possible that some amount +// of space is already being used by some other existing volumes. +func (c *CSP) GetTotalUsableCapacityWithThreshold() (resource.Quantity, error) { + totalCapacity, err := resource.ParseQuantity(c.Object.Status.Capacity.Total) + if err != nil { + return resource.Quantity{}, errors.Wrapf(err, "failed to parse capcity {%s} from CSP %s", c.Object.Status.Capacity.Total, c.Object.Name) + } + capacityThreshold := c.GetCapacityThreshold() + + totalCapacityValue := totalCapacity.Value() + totalUsableCapacityValue := totalCapacityValue * (int64(capacityThreshold)) / 100 + totalCapacity.Set(totalUsableCapacityValue) + return totalCapacity, nil +} + +// GetCapacityThreshold returns the capacity threshold. +// Example : +// If pool size is 100 Gi and capacity threshold is 70 % +// then the effective size of pool is 70 Gi for accommodating volumes. +func (c *CSP) GetCapacityThreshold() int { + // ToDo: Add capability to override via annotaions + return PoolCapacityThresholdPercentage +} + // HasAnnotation returns true if provided annotation // key and value are present in the provided CSP // instance func HasAnnotation(key, value string) predicate { - return func(c *csp) bool { - val, ok := c.object.GetAnnotations()[key] + return func(c *CSP) bool { + val, ok := c.Object.GetAnnotations()[key] if ok { return val == value } @@ -79,12 +143,12 @@ func HasAnnotation(key, value string) predicate { // CSPList holds the list of cstorpools type CSPList struct { // list of cstor pools - items []*csp + Items []*CSP } -// Filter will filter the csp instances +// Filter will filter the CSP instances // if all the predicates succeed against that -// csp. +// CSP. func (l *CSPList) Filter(p ...predicate) *CSPList { var plist predicateList plist = append(plist, p...) @@ -93,22 +157,22 @@ func (l *CSPList) Filter(p ...predicate) *CSPList { } filtered := ListBuilder().List() - for _, csp := range l.items { - if plist.all(csp) { - filtered.items = append(filtered.items, csp) + for _, CSP := range l.Items { + if plist.all(CSP) { + filtered.Items = append(filtered.Items, CSP) } } return filtered } // listBuilder enables building a -// list of csp instances +// list of CSP instances type listBuilder struct { list *CSPList } // ListBuilder returns a new instance of -// listBuilder object +// listBuilder Object func ListBuilder() *listBuilder { return &listBuilder{list: &CSPList{}} } @@ -117,21 +181,22 @@ func ListBuilder() *listBuilder { // based on the provided pool UIDs func (b *listBuilder) WithUIDs(poolUIDs ...string) *listBuilder { for _, uid := range poolUIDs { - obj := &csp{&apis.CStorPool{}} - obj.object.SetUID(types.UID(uid)) - b.list.items = append(b.list.items, obj) + obj := &CSP{&apis.CStorPool{}} + obj.Object.SetUID(types.UID(uid)) + b.list.Items = append(b.list.Items, obj) } return b } -// WithUIDNodeMap builds a cspList based on the provided +// WithUIDNodeMap builds a CSPList based on the provided // map of uid and nodename -func (b *listBuilder) WithUIDNode(UIDNode map[string]string) *listBuilder { +func (b *listBuilder) WithUIDNode(UIDNode, UIDCapacity map[string]string) *listBuilder { for k, v := range UIDNode { - obj := &csp{&apis.CStorPool{}} - obj.object.SetUID(types.UID(k)) - obj.object.SetAnnotations(map[string]string{string(scheduleOnHostAnnotation): v}) - b.list.items = append(b.list.items, obj) + obj := &CSP{&apis.CStorPool{}} + obj.Object.SetUID(types.UID(k)) + obj.Object.SetAnnotations(map[string]string{string(scheduleOnHostAnnotation): v}) + obj.Object.Status.Capacity.Total = UIDCapacity[k] + b.list.Items = append(b.list.Items, obj) } return b } @@ -142,7 +207,7 @@ func (b *listBuilder) WithList(pools *CSPList) *listBuilder { if pools == nil { return b } - b.list.items = append(b.list.items, pools.items...) + b.list.Items = append(b.list.Items, pools.Items...) return b } @@ -154,13 +219,13 @@ func (b *listBuilder) WithAPIList(pools *apis.CStorPoolList) *listBuilder { } for _, pool := range pools.Items { pool := pool //pin it - b.list.items = append(b.list.items, &csp{&pool}) + b.list.Items = append(b.list.Items, &CSP{&pool}) } return b } -// List returns the list of csp +// List returns the list of CSP // instances that were built by // this builder func (b *listBuilder) List() *CSPList { @@ -171,16 +236,16 @@ func (b *listBuilder) List() *CSPList { // available in the list func (l *CSPList) GetPoolUIDs() []string { uids := []string{} - for _, pool := range l.items { - uids = append(uids, string(pool.object.GetUID())) + for _, pool := range l.Items { + uids = append(uids, string(pool.Object.GetUID())) } return uids } // newListFromUIDNode exposes WithUIDNodeMap // to CAS Templates -func newListFromUIDNode(UIDNodeMap map[string]string) *CSPList { - return ListBuilder().WithUIDNode(UIDNodeMap).List() +func newListFromUIDNode(UIDNodeMap, UIDCapacityMap map[string]string) *CSPList { + return ListBuilder().WithUIDNode(UIDNodeMap, UIDCapacityMap).List() } // newListFromUIDs exposes WithUIDs to CASTemplates diff --git a/pkg/cstor/pool/v1alpha2/cstorpool_test.go b/pkg/cstor/pool/v1alpha2/cstorpool_test.go index 6d3cc1d358..a9aa2d2aa2 100644 --- a/pkg/cstor/pool/v1alpha2/cstorpool_test.go +++ b/pkg/cstor/pool/v1alpha2/cstorpool_test.go @@ -23,8 +23,8 @@ import ( "k8s.io/apimachinery/pkg/types" ) -func mockAlwaysTrue(*csp) bool { return true } -func mockAlwaysFalse(*csp) bool { return false } +func mockAlwaysTrue(*CSP) bool { return true } +func mockAlwaysFalse(*CSP) bool { return false } func TestCStorPoolAll(t *testing.T) { tests := map[string]struct { @@ -48,7 +48,7 @@ func TestCStorPoolAll(t *testing.T) { } for name, mock := range tests { t.Run(name, func(t *testing.T) { - if output := mock.Predicates.all(&csp{}); output != mock.expectedOutput { + if output := mock.Predicates.all(&CSP{}); output != mock.expectedOutput { t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedOutput, output) } }) @@ -57,7 +57,7 @@ func TestCStorPoolAll(t *testing.T) { func TestCStorPoolIsNotUID(t *testing.T) { tests := map[string]struct { - cspuid types.UID + CSPuid types.UID uids []string expectedOutput bool }{ @@ -77,7 +77,7 @@ func TestCStorPoolIsNotUID(t *testing.T) { } for name, mock := range tests { t.Run(name, func(t *testing.T) { - mockCSP := &csp{&apis.CStorPool{ObjectMeta: metav1.ObjectMeta{UID: mock.cspuid}}} + mockCSP := &CSP{&apis.CStorPool{ObjectMeta: metav1.ObjectMeta{UID: mock.CSPuid}}} p := IsNotUID(mock.uids...) if output := p(mockCSP); output != mock.expectedOutput { t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedOutput, output) @@ -103,8 +103,8 @@ func TestCStorPoolFilterUIDs(t *testing.T) { } for name, mock := range tests { t.Run(name, func(t *testing.T) { - cspL := ListBuilder().WithUIDs(mock.UIDs...).List() - output := cspL.Filter(mock.Predicates...) + CSPL := ListBuilder().WithUIDs(mock.UIDs...).List() + output := CSPL.Filter(mock.Predicates...) if len(mock.expectedOutput) != len(output.GetPoolUIDs()) { t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedOutput, output.GetPoolUIDs()) } @@ -136,12 +136,12 @@ func TestCStorPoolWithUIDs(t *testing.T) { for name, mock := range tests { t.Run(name, func(t *testing.T) { lb := ListBuilder().WithUIDs(mock.expectedUIDs...) - if len(lb.list.items) != len(mock.expectedUIDs) { - t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedUIDs, lb.list.items) + if len(lb.list.Items) != len(mock.expectedUIDs) { + t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedUIDs, lb.list.Items) } - for index, val := range lb.list.items { - if string(val.object.GetUID()) != mock.expectedUIDs[index] { - t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedUIDs[index], string(val.object.GetUID())) + for index, val := range lb.list.Items { + if string(val.Object.GetUID()) != mock.expectedUIDs[index] { + t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedUIDs[index], string(val.Object.GetUID())) } } }) @@ -167,12 +167,12 @@ func TestCstorPoolList(t *testing.T) { for name, mock := range tests { t.Run(name, func(t *testing.T) { lb := ListBuilder().WithUIDs(mock.expectedUIDs...).List() - if len(lb.items) != len(mock.expectedUIDs) { - t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedUIDs, lb.items) + if len(lb.Items) != len(mock.expectedUIDs) { + t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedUIDs, lb.Items) } - for index, val := range lb.items { - if string(val.object.GetUID()) != mock.expectedUIDs[index] { - t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedUIDs[index], string(val.object.GetUID())) + for index, val := range lb.Items { + if string(val.Object.GetUID()) != mock.expectedUIDs[index] { + t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedUIDs[index], string(val.Object.GetUID())) } } }) @@ -208,19 +208,20 @@ func TestBuildWithListUids(t *testing.T) { func TestNewListFromUIDNode(t *testing.T) { tests := map[string]struct { - UIDNodeMap map[string]string - expectedPools []string + UIDNodeMap map[string]string + UIDCapacityMap map[string]string + expectedPools []string }{ - "Test 1": {map[string]string{"Pool 1": "host 1"}, []string{"Pool 1"}}, - "Test 2": {map[string]string{"Pool 1": "host 1", "Pool 2": "host 2"}, []string{"Pool 1", "Pool 2"}}, - "Test 3": {map[string]string{"Pool 1": "host 1", "Pool 2": "host 2", "Pool 3": "host 3"}, []string{"Pool 1", "Pool 2", "Pool 3"}}, - "Test 4": {map[string]string{"Pool 1": "host 1", "Pool 2": "host 2", "Pool 3": "host 3", "Pool 4": "host 4"}, []string{"Pool 1", "Pool 2", "Pool 3", "Pool 4"}}, - "Test 5": {map[string]string{"Pool 1": "host 1", "Pool 2": "host 2", "Pool 3": "host 3", "Pool 4": "host 4", "Pool 5": "host 5"}, []string{"Pool 1", "Pool 2", "Pool 3", "Pool 4", "Pool 5"}}, + "Test 1": {map[string]string{"Pool 1": "host 1"}, map[string]string{"Pool 1": "9.40G"}, []string{"Pool 1"}}, + "Test 2": {map[string]string{"Pool 1": "host 1", "Pool 2": "host 2"}, map[string]string{"Pool 1": "9.40G"}, []string{"Pool 1", "Pool 2"}}, + "Test 3": {map[string]string{"Pool 1": "host 1", "Pool 2": "host 2", "Pool 3": "host 3"}, map[string]string{"Pool 1": "9.40G"}, []string{"Pool 1", "Pool 2", "Pool 3"}}, + "Test 4": {map[string]string{"Pool 1": "host 1", "Pool 2": "host 2", "Pool 3": "host 3", "Pool 4": "host 4"}, map[string]string{"Pool 1": "9.40G"}, []string{"Pool 1", "Pool 2", "Pool 3", "Pool 4"}}, + "Test 5": {map[string]string{"Pool 1": "host 1", "Pool 2": "host 2", "Pool 3": "host 3", "Pool 4": "host 4", "Pool 5": "host 5"}, map[string]string{"Pool 1": "9.40G"}, []string{"Pool 1", "Pool 2", "Pool 3", "Pool 4", "Pool 5"}}, } for name, mock := range tests { t.Run(name, func(t *testing.T) { - output := newListFromUIDNode(mock.UIDNodeMap).GetPoolUIDs() + output := newListFromUIDNode(mock.UIDNodeMap, mock.UIDCapacityMap).GetPoolUIDs() if len(output) != len(mock.expectedPools) { t.Fatalf("Test %v failed: Expected %v but got %v", name, mock.expectedPools, output) } @@ -262,7 +263,7 @@ func TestTemplateFunctionsCount(t *testing.T) { t.Run(name, func(t *testing.T) { p := TemplateFunctions() if len(p) != test.expectedLength { - t.Fatalf("test %q failed: expected items %v but got %v", name, test.expectedLength, len(p)) + t.Fatalf("test %q failed: expected Items %v but got %v", name, test.expectedLength, len(p)) } }) } @@ -283,10 +284,10 @@ func TestHasAnnotation(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - fakeCSP := &csp{&apis.CStorPool{ObjectMeta: metav1.ObjectMeta{Annotations: test.availableAnnotations}}} + fakeCSP := &CSP{&apis.CStorPool{ObjectMeta: metav1.ObjectMeta{Annotations: test.availableAnnotations}}} ok := HasAnnotation(test.checkForKey, test.checkForValue)(fakeCSP) if ok != test.hasAnnotation { - t.Fatalf("Test %v failed, Expected %v but got %v", name, test.availableAnnotations, fakeCSP.object.GetAnnotations()) + t.Fatalf("Test %v failed, Expected %v but got %v", name, test.availableAnnotations, fakeCSP.Object.GetAnnotations()) } }) } @@ -312,9 +313,9 @@ func TestWithAPIList(t *testing.T) { } b := ListBuilder().WithAPIList(&apis.CStorPoolList{Items: poolItems}) - for index, ob := range b.list.items { - if !reflect.DeepEqual(*ob.object, poolItems[index]) { - t.Fatalf("test %q failed: expected %v \n got : %v \n", name, poolItems[index], ob.object) + for index, ob := range b.list.Items { + if !reflect.DeepEqual(*ob.Object, poolItems[index]) { + t.Fatalf("test %q failed: expected %v \n got : %v \n", name, poolItems[index], ob.Object) } } }) diff --git a/pkg/install/v1alpha1/cstor_volume.go b/pkg/install/v1alpha1/cstor_volume.go index 83c89c9e56..ad52a2e607 100644 --- a/pkg/install/v1alpha1/cstor_volume.go +++ b/pkg/install/v1alpha1/cstor_volume.go @@ -259,6 +259,8 @@ spec: {{- $poolsList | keyMap "cvolPoolList" .ListItems | noop -}} {{- $poolsNodeList := jsonpath .JsonResult "{range .items[*]}pkey=pools,{@.metadata.uid}={@.metadata.labels.kubernetes\\.io/hostname};{end}" | trim | default "" | splitList ";" -}} {{- $poolsNodeList | keyMap "cvolPoolNodeList" .ListItems | noop -}} + {{- $poolsCapList := jsonpath .JsonResult "{range .items[*]}pkey=poolsCapacity,{@.metadata.uid}={@.status.capacity.total};{end}" | trim | default "" | splitList ";" -}} + {{- $poolsCapList | keyMap "cvolPoolCapList" .ListItems | noop -}} {{- end }} --- #runTask to get storageclass info @@ -679,13 +681,17 @@ spec: Add as many poolUid to resources as there is replica count */}} {{- $hostName := .TaskResult.creategetpvc.hostName -}} + {{- $capacity := .Volume.capacity -}} + {{- $spc := .Config.StoragePoolClaim.value }} {{- $replicaAntiAffinity := .TaskResult.creategetpvc.replicaAntiAffinity }} {{- $preferredReplicaAntiAffinity := .TaskResult.creategetpvc.preferredReplicaAntiAffinity }} {{- $antiAffinityLabelSelector := printf "openebs.io/replica-anti-affinity=%s" $replicaAntiAffinity | IfNotNil $replicaAntiAffinity }} {{- $preferredAntiAffinityLabelSelector := printf "openebs.io/preferred-replica-anti-affinity=%s" $preferredReplicaAntiAffinity | IfNotNil $preferredReplicaAntiAffinity }} {{- $preferedScheduleOnHostAnnotationSelector := printf "volume.kubernetes.io/selected-node=%s" $hostName | IfNotNil $hostName }} - {{- $selectionPolicies := cspGetPolicies $antiAffinityLabelSelector $preferredAntiAffinityLabelSelector $preferedScheduleOnHostAnnotationSelector }} - {{- $pools := createCSPListFromUIDNodeMap (getMapofString .ListItems.cvolPoolNodeList "pools") }} + {{- $volumeCapacity := printf "volume.kubernetes.io/capacity=%s" $capacity | IfNotNil $capacity }} + {{- $spcName := printf "openebs.io/storage-pool-claim=%s" $spc | IfNotNil $spc }} + {{- $selectionPolicies := cspGetPolicies $antiAffinityLabelSelector $preferredAntiAffinityLabelSelector $preferedScheduleOnHostAnnotationSelector $volumeCapacity $spcName }} + {{- $pools := createCSPListFromUIDNodeMap (getMapofString .ListItems.cvolPoolNodeList "pools") (getMapofString .ListItems.cvolPoolCapList "poolsCapacity") }} {{- $poolUids := cspFilterPoolIDs $pools $selectionPolicies | randomize }} {{- $replicaCount := .Config.ReplicaCount.value | int64 -}} {{- if lt (len $poolUids) $replicaCount -}} diff --git a/pkg/storagepoolclaim/v1alpha1/storagepoolclaim.go b/pkg/storagepoolclaim/v1alpha1/storagepoolclaim.go index 873a866914..ed95b23531 100644 --- a/pkg/storagepoolclaim/v1alpha1/storagepoolclaim.go +++ b/pkg/storagepoolclaim/v1alpha1/storagepoolclaim.go @@ -90,6 +90,13 @@ func (l predicateList) all(c *SPC) bool { return true } +// IsOverProvisioningEnabled returns OverProvisioning truth value. +func IsOverProvisioningEnabled() Predicate { + return func(spc *SPC) bool { + return spc.Object.Spec.PoolSpec.OverProvisioning + } +} + // HasAnnotation returns true if provided annotation key and value are present in the provided spc instance. func HasAnnotation(key, value string) Predicate { return func(c *SPC) bool { From 26fbeed011c5acda0fc9bc98f1f77058c0336352 Mon Sep 17 00:00:00 2001 From: Ashutosh Kumar Date: Fri, 17 Jan 2020 13:01:26 +0530 Subject: [PATCH 2/6] incorporate review comments Signed-off-by: Ashutosh Kumar --- .../cstorpoolselect/v1alpha1/select.go | 2 +- pkg/cstor/pool/v1alpha2/cstorpool_test.go | 30 +++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go index 4b81241cc1..5430267c36 100644 --- a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go +++ b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go @@ -147,7 +147,7 @@ func (p scheduleWithOverProvisioningAwareness) filter(pools *csp.CSPList) (*csp. return pools, nil } - filteredPools := csp.ListBuilder().List() + filteredPools := &csp.CSPList{Items: []*csp.CSP{}} for _, pool := range pools.Items { volCap, err := getAllVolumeCapacity(pool.Object) if err != nil { diff --git a/pkg/cstor/pool/v1alpha2/cstorpool_test.go b/pkg/cstor/pool/v1alpha2/cstorpool_test.go index a9aa2d2aa2..5d53131e23 100644 --- a/pkg/cstor/pool/v1alpha2/cstorpool_test.go +++ b/pkg/cstor/pool/v1alpha2/cstorpool_test.go @@ -47,6 +47,9 @@ func TestCStorPoolAll(t *testing.T) { "Negative Predicate 9": {[]predicate{mockAlwaysFalse, mockAlwaysFalse, mockAlwaysFalse}, false}, } for name, mock := range tests { + // pin it + name := name + mock := mock t.Run(name, func(t *testing.T) { if output := mock.Predicates.all(&CSP{}); output != mock.expectedOutput { t.Fatalf("test %q failed: expected %v \n got : %v \n", name, mock.expectedOutput, output) @@ -76,6 +79,9 @@ func TestCStorPoolIsNotUID(t *testing.T) { "Negative 5": {"uid5", []string{"uid1", "uid2", "uid3", "uid4", "uid5"}, false}, } for name, mock := range tests { + // pin it + name := name + mock := mock t.Run(name, func(t *testing.T) { mockCSP := &CSP{&apis.CStorPool{ObjectMeta: metav1.ObjectMeta{UID: mock.CSPuid}}} p := IsNotUID(mock.uids...) @@ -102,6 +108,9 @@ func TestCStorPoolFilterUIDs(t *testing.T) { "Negative 3": {[]predicate{mockAlwaysFalse, mockAlwaysFalse}, []string{"uid1", "uid2", "uid3"}, []string{}}, } for name, mock := range tests { + // pin it + name := name + mock := mock t.Run(name, func(t *testing.T) { CSPL := ListBuilder().WithUIDs(mock.UIDs...).List() output := CSPL.Filter(mock.Predicates...) @@ -134,6 +143,9 @@ func TestCStorPoolWithUIDs(t *testing.T) { } for name, mock := range tests { + // pin it + name := name + mock := mock t.Run(name, func(t *testing.T) { lb := ListBuilder().WithUIDs(mock.expectedUIDs...) if len(lb.list.Items) != len(mock.expectedUIDs) { @@ -165,6 +177,9 @@ func TestCstorPoolList(t *testing.T) { } for name, mock := range tests { + // pin it + name := name + mock := mock t.Run(name, func(t *testing.T) { lb := ListBuilder().WithUIDs(mock.expectedUIDs...).List() if len(lb.Items) != len(mock.expectedUIDs) { @@ -196,6 +211,9 @@ func TestBuildWithListUids(t *testing.T) { } for name, mock := range tests { + // pin it + name := name + mock := mock t.Run(name, func(t *testing.T) { lb := ListBuilder().WithUIDs(mock.expectedUIDs...).List() if len(lb.GetPoolUIDs()) != len(mock.expectedUIDs) { @@ -220,6 +238,9 @@ func TestNewListFromUIDNode(t *testing.T) { } for name, mock := range tests { + // pin it + name := name + mock := mock t.Run(name, func(t *testing.T) { output := newListFromUIDNode(mock.UIDNodeMap, mock.UIDCapacityMap).GetPoolUIDs() if len(output) != len(mock.expectedPools) { @@ -242,6 +263,9 @@ func TestNewListFromUIDs(t *testing.T) { } for name, mock := range tests { + // pin it + name := name + mock := mock t.Run(name, func(t *testing.T) { output := newListFromUIDs(mock.PoolUIDs).GetPoolUIDs() if len(output) != len(mock.PoolUIDs) { @@ -260,6 +284,9 @@ func TestTemplateFunctionsCount(t *testing.T) { } for name, test := range tests { + // pin it + name := name + test := test t.Run(name, func(t *testing.T) { p := TemplateFunctions() if len(p) != test.expectedLength { @@ -283,6 +310,9 @@ func TestHasAnnotation(t *testing.T) { } for name, test := range tests { + // pin it + name := name + test := test t.Run(name, func(t *testing.T) { fakeCSP := &CSP{&apis.CStorPool{ObjectMeta: metav1.ObjectMeta{Annotations: test.availableAnnotations}}} ok := HasAnnotation(test.checkForKey, test.checkForValue)(fakeCSP) From 08a1e9e4afa8cee07913e413370c4228a95227bb Mon Sep 17 00:00:00 2001 From: Ashutosh Kumar Date: Fri, 17 Jan 2020 13:10:20 +0530 Subject: [PATCH 3/6] incorporate review comments Signed-off-by: Ashutosh Kumar --- .../cstorpoolselect/v1alpha1/select.go | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go index 5430267c36..0adb9fef3c 100644 --- a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go +++ b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go @@ -26,6 +26,7 @@ import ( cstorvolume "github.com/openebs/maya/pkg/cstor/volume/v1alpha1" cstorvolumereplica "github.com/openebs/maya/pkg/cstor/volumereplica/v1alpha1" cvr "github.com/openebs/maya/pkg/cstor/volumereplica/v1alpha1" + env "github.com/openebs/maya/pkg/env/v1alpha1" spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -118,7 +119,7 @@ type policy interface { type scheduleWithOverProvisioningAwareness struct { overProvisioning bool spcName string - volumeNamespace string + openebsNamespace string capacity resource.Quantity err []error } @@ -149,7 +150,7 @@ func (p scheduleWithOverProvisioningAwareness) filter(pools *csp.CSPList) (*csp. filteredPools := &csp.CSPList{Items: []*csp.CSP{}} for _, pool := range pools.Items { - volCap, err := getAllVolumeCapacity(pool.Object) + volCap, err := p.getAllVolumeCapacity(pool.Object) if err != nil { return nil, errors.Wrapf(err, "failed to get capacity consumed by existing volumes on pool %s ", pool.Object.UID) } @@ -164,11 +165,11 @@ func (p scheduleWithOverProvisioningAwareness) filter(pools *csp.CSPList) (*csp. // getAllVolumeCapacity returns the sum of total capacities of all the volumes // present of the given CSP. -func getAllVolumeCapacity(csp *apis.CStorPool) (resource.Quantity, error) { +func (p *scheduleWithOverProvisioningAwareness) getAllVolumeCapacity(csp *apis.CStorPool) (resource.Quantity, error) { var totalcapcity resource.Quantity cstorVolumeMap := make(map[string]bool) label := string(apis.CStorPoolKey) + "=" + csp.Name - cstorVolumeReplicaObjList, err := cstorvolumereplica.NewKubeclient().WithNamespace("openebs").List(metav1.ListOptions{LabelSelector: label}) + cstorVolumeReplicaObjList, err := cstorvolumereplica.NewKubeclient().WithNamespace(p.openebsNamespace).List(metav1.ListOptions{LabelSelector: label}) if err != nil { return resource.Quantity{}, errors.Wrapf(err, "error in listing all cvr resources present on %s csp", csp.Name) } @@ -180,7 +181,7 @@ func getAllVolumeCapacity(csp *apis.CStorPool) (resource.Quantity, error) { } for cv, _ := range cstorVolumeMap { - cap, err := getCStorVolumeCapacity(cv) + cap, err := p.getCStorVolumeCapacity(cv) if err != nil { return resource.Quantity{}, errors.Wrapf(err, "failed to get capacity for cstorvolume %s", cv) } @@ -192,8 +193,8 @@ func getAllVolumeCapacity(csp *apis.CStorPool) (resource.Quantity, error) { } // getCStorVolumeCapacity returns the capacity present on a CStorVolume CR. -func getCStorVolumeCapacity(name string) (resource.Quantity, error) { - cv, err := cstorvolume.NewKubeclient().WithNamespace("openebs").Get(name, metav1.GetOptions{}) +func (p *scheduleWithOverProvisioningAwareness) getCStorVolumeCapacity(name string) (resource.Quantity, error) { + cv, err := cstorvolume.NewKubeclient().WithNamespace(p.openebsNamespace).Get(name, metav1.GetOptions{}) if err != nil { return resource.Quantity{}, errors.Wrapf(err, "failed to fetch cstorvolume %s", name) } @@ -565,6 +566,11 @@ func CapacityAwareScheduling(values ...string) buildOption { } overProvisioningPolicy.capacity = volCapacity + // Get the namespace where OpenEBS is installed + + openEBSnamespace := env.Get(env.OpenEBSNamespace) + overProvisioningPolicy.openebsNamespace = openEBSnamespace + if len(overProvisioningPolicy.err) == 0 { spc, err := getSPC(spcName) if err != nil { From c7edd0216d8f738dd7d3aabbf6ba503e8ba579e8 Mon Sep 17 00:00:00 2001 From: Ashutosh Kumar Date: Fri, 17 Jan 2020 14:43:01 +0530 Subject: [PATCH 4/6] incorporate review comments Signed-off-by: Ashutosh Kumar --- .../cstorpoolselect/v1alpha1/select.go | 37 ++++++++++++------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go index 0adb9fef3c..036fae93df 100644 --- a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go +++ b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go @@ -46,6 +46,7 @@ const ( // replicaAntiAffinty is the label key // that refers to replica anti affinity policy replicaAntiAffinityLabel labelKey = "openebs.io/replica-anti-affinity" + volumeCapacityLabel labelKey = "volume.kubernetes.io/capacity" ) type annotationKey string @@ -117,11 +118,17 @@ type policy interface { // scheduleWithOverProvisioningAwareness is a pool // selection implementation. type scheduleWithOverProvisioningAwareness struct { + // overProvisioning field if true means over-provisioning is enabled or vice-versa. overProvisioning bool - spcName string + // spcName is the name of the SPC to which the over-provisioning policy will be + // applied and volume will be created from CSPs of this SPC. + spcName string + // openebsNamespace is the namespace where OpenEBS is installed. openebsNamespace string - capacity resource.Quantity - err []error + // totalCapacity is the capacity of the incoming volume. + totalCapacity resource.Quantity + // err constains a list of error in if any while building this current structure. + err []error } // priority returns the priority of the @@ -144,20 +151,21 @@ func (p scheduleWithOverProvisioningAwareness) filter(pools *csp.CSPList) (*csp. } if p.overProvisioning { - klog.Info("Overprovisioning restriction policy not added as overprovisioning is enabled") + klog.Infof("Overprovisioning restriction policy not added as overprovisioning is enabled on spc %s", p.spcName) return pools, nil } filteredPools := &csp.CSPList{Items: []*csp.CSP{}} for _, pool := range pools.Items { - volCap, err := p.getAllVolumeCapacity(pool.Object) + volCap, err := p.consumedCapacity(pool.Object) if err != nil { - return nil, errors.Wrapf(err, "failed to get capacity consumed by existing volumes on pool %s ", pool.Object.UID) + klog.Errorf("failed to get capacity consumed by existing volumes on pool %s:{%s} ", pool.Object.UID, err.Error()) + continue } - if pool.HasSpace(p.capacity, volCap) { + if pool.HasSpace(p.totalCapacity, volCap) { filteredPools.Items = append(filteredPools.Items, pool) } else { - klog.V(2).Infof("CSP with UID %s rejected due to over provisioning policy", pool.Object.UID) + klog.V(2).Infof("Can't select CSP with UID %q: Required space not available: Policy %s", pool.Object.UID, overProvisioningPolicy) } } return filteredPools, nil @@ -165,17 +173,18 @@ func (p scheduleWithOverProvisioningAwareness) filter(pools *csp.CSPList) (*csp. // getAllVolumeCapacity returns the sum of total capacities of all the volumes // present of the given CSP. -func (p *scheduleWithOverProvisioningAwareness) getAllVolumeCapacity(csp *apis.CStorPool) (resource.Quantity, error) { +func (p *scheduleWithOverProvisioningAwareness) consumedCapacity(csp *apis.CStorPool) (resource.Quantity, error) { var totalcapcity resource.Quantity cstorVolumeMap := make(map[string]bool) label := string(apis.CStorPoolKey) + "=" + csp.Name cstorVolumeReplicaObjList, err := cstorvolumereplica.NewKubeclient().WithNamespace(p.openebsNamespace).List(metav1.ListOptions{LabelSelector: label}) if err != nil { - return resource.Quantity{}, errors.Wrapf(err, "error in listing all cvr resources present on %s csp", csp.Name) + return resource.Quantity{}, errors.Wrapf(err, "Failed to get total volume capacity for CSP %s", csp.Name) } for _, cvr := range cstorVolumeReplicaObjList.Items { if cvr.Labels == nil { - return resource.Quantity{}, errors.Errorf("No labels found on cvr %s", cvr.Name) + return resource.Quantity{}, errors.Errorf("Failed to get total volume capacity for CSP %s: "+ + "Missing labels in CVR %s: Want label %s to calculate total volume capacity", csp.UID, cvr.Name, volumeCapacityLabel) } cstorVolumeMap[cvr.Labels[string(apis.CStorVolumeKey)]] = true } @@ -564,7 +573,7 @@ func CapacityAwareScheduling(values ...string) buildOption { if err != nil { overProvisioningPolicy.err = append(overProvisioningPolicy.err, err) } - overProvisioningPolicy.capacity = volCapacity + overProvisioningPolicy.totalCapacity = volCapacity // Get the namespace where OpenEBS is installed @@ -588,7 +597,7 @@ func CapacityAwareScheduling(values ...string) buildOption { func getSPCName(values ...string) string { for _, val := range values { - if strings.Contains(val, string("openebs.io/storage-pool-claim")) { + if strings.Contains(val, string(apis.StoragePoolClaimCPK)) { str := strings.Split(val, "=") return str[1] } @@ -603,7 +612,7 @@ func getSPC(name string) (*apis.StoragePoolClaim, error) { func getVolumeCapacity(values ...string) (resource.Quantity, error) { var capacity string for _, val := range values { - if strings.Contains(val, string("volume.kubernetes.io/capacity")) { + if strings.Contains(val, string(volumeCapacityLabel)) { str := strings.Split(val, "=") capacity = str[1] } From 8ba92b6f803493c6094a2a577558a98648f16054 Mon Sep 17 00:00:00 2001 From: Ashutosh Kumar Date: Fri, 17 Jan 2020 14:48:32 +0530 Subject: [PATCH 5/6] incorporate review comments Signed-off-by: Ashutosh Kumar --- pkg/algorithm/cstorpoolselect/v1alpha1/select.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go index 036fae93df..a878f05bb9 100644 --- a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go +++ b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go @@ -189,7 +189,7 @@ func (p *scheduleWithOverProvisioningAwareness) consumedCapacity(csp *apis.CStor cstorVolumeMap[cvr.Labels[string(apis.CStorVolumeKey)]] = true } - for cv, _ := range cstorVolumeMap { + for cv := range cstorVolumeMap { cap, err := p.getCStorVolumeCapacity(cv) if err != nil { return resource.Quantity{}, errors.Wrapf(err, "failed to get capacity for cstorvolume %s", cv) From 353d4b6cb0d587ee001f42075c82931a7398a8b1 Mon Sep 17 00:00:00 2001 From: Ashutosh Kumar Date: Wed, 22 Jan 2020 11:53:56 +0530 Subject: [PATCH 6/6] incoporate review comments Signed-off-by: Ashutosh Kumar --- .../cstorpoolselect/v1alpha1/select.go | 18 +++++++++--------- .../cstorpoolselect/v1alpha1/select_test.go | 8 ++++---- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go index a878f05bb9..5a80d219f4 100644 --- a/pkg/algorithm/cstorpoolselect/v1alpha1/select.go +++ b/pkg/algorithm/cstorpoolselect/v1alpha1/select.go @@ -554,9 +554,9 @@ func PreferScheduleOnHostAnnotation(hostNameAnnotation string) buildOption { } } -// CapacityAwareScheduling adds scheduleWithOverProvisioningAwareness as a policy +// CapacityAwareProvisioning adds scheduleWithOverProvisioningAwareness as a policy // to be used during pool selection. -func CapacityAwareScheduling(values ...string) buildOption { +func CapacityAwareProvisioning(values ...string) buildOption { return func(s *selection) { var err error overProvisioningPolicy := &scheduleWithOverProvisioningAwareness{} @@ -635,7 +635,7 @@ func GetPolicies(values ...string) []buildOption { opts = append(opts, AntiAffinityLabel(val)) } } - opts = append(opts, CapacityAwareScheduling(values...)) + opts = append(opts, CapacityAwareProvisioning(values...)) return opts } @@ -703,11 +703,11 @@ func FilterPoolIDs(entries *csp.CSPList, opts []buildOption) ([]string, error) { // go template functions func TemplateFunctions() template.FuncMap { return template.FuncMap{ - "cspGetPolicies": GetPolicies, - "cspFilterPoolIDs": FilterPoolIDs, - "cspAntiAffinity": AntiAffinityLabel, - "cspPreferAntiAffinity": PreferAntiAffinityLabel, - "preferScheduleOnHost": PreferScheduleOnHostAnnotation, - "capacityAwareScheduling": CapacityAwareScheduling, + "cspGetPolicies": GetPolicies, + "cspFilterPoolIDs": FilterPoolIDs, + "cspAntiAffinity": AntiAffinityLabel, + "cspPreferAntiAffinity": PreferAntiAffinityLabel, + "preferScheduleOnHost": PreferScheduleOnHostAnnotation, + "capacityAwareProvisioning": CapacityAwareProvisioning, } } diff --git a/pkg/algorithm/cstorpoolselect/v1alpha1/select_test.go b/pkg/algorithm/cstorpoolselect/v1alpha1/select_test.go index 58926dd407..f14bb9d569 100644 --- a/pkg/algorithm/cstorpoolselect/v1alpha1/select_test.go +++ b/pkg/algorithm/cstorpoolselect/v1alpha1/select_test.go @@ -637,10 +637,10 @@ func TestGetPolicies(t *testing.T) { selectors []string expectedBuildoptions []buildOption }{ - "Test 1": {selectors: []string{}, expectedBuildoptions: []buildOption{CapacityAwareScheduling()}}, - "Test 2": {selectors: []string{fakeAntiAffinitySelector}, expectedBuildoptions: []buildOption{AntiAffinityLabel(fakeAntiAffinitySelector), CapacityAwareScheduling()}}, - "Test 3": {selectors: []string{fakePreferAntiAffinitySelector}, expectedBuildoptions: []buildOption{PreferAntiAffinityLabel(fakePreferAntiAffinitySelector), CapacityAwareScheduling()}}, - "Test 4": {selectors: []string{fakePreferScheduleOnHostSelector}, expectedBuildoptions: []buildOption{PreferScheduleOnHostAnnotation(fakePreferScheduleOnHostSelector), CapacityAwareScheduling()}}, + "Test 1": {selectors: []string{}, expectedBuildoptions: []buildOption{CapacityAwareProvisioning()}}, + "Test 2": {selectors: []string{fakeAntiAffinitySelector}, expectedBuildoptions: []buildOption{AntiAffinityLabel(fakeAntiAffinitySelector), CapacityAwareProvisioning()}}, + "Test 3": {selectors: []string{fakePreferAntiAffinitySelector}, expectedBuildoptions: []buildOption{PreferAntiAffinityLabel(fakePreferAntiAffinitySelector), CapacityAwareProvisioning()}}, + "Test 4": {selectors: []string{fakePreferScheduleOnHostSelector}, expectedBuildoptions: []buildOption{PreferScheduleOnHostAnnotation(fakePreferScheduleOnHostSelector), CapacityAwareProvisioning()}}, } for name, test := range tests { t.Run(name, func(t *testing.T) {