Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(migrate): add support for cstor volume migration #9

Merged
merged 5 commits into from
Jun 12, 2020
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ before_script: # TODO add golangci yaml config
script:
- make test
- if [ "$TRAVIS_CPU_ARCH" == "amd64" ]; then
make upgrade-image.amd64;
make all.amd64;
fi

after_success:
Expand Down
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,9 @@ cleanup-upgrade:

include ./build/migrate/Makefile.mk

.PHONY: all.amd64
all.amd64: upgrade-image.amd64 migrate-image.amd64

# Push images
.PHONY: deploy-images
deploy-images:
Expand Down
3 changes: 3 additions & 0 deletions build/deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,12 @@ ARCH=$(uname -m)

if [ "${ARCH}" = "x86_64" ]; then
UPGRADE_IMG="${IMAGE_ORG}/upgrade-amd64"
MIGRATE_IMG="${IMAGE_ORG}/migrate-amd64"
elif [ "${ARCH}" = "aarch64" ]; then
UPGRADE_IMG="${IMAGE_ORG}/upgrade-arm64"
MIGRATE_IMG="${IMAGE_ORG}/migrate-arm64"
fi

# tag and push all the images
DIMAGE="${UPGRADE_IMG}" ./build/push
DIMAGE="${MIGRATE_IMG}" ./build/push
83 changes: 83 additions & 0 deletions cmd/migrate/executor/cstor_volume.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/*
Copyright 2020 The OpenEBS Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package executor

import (
"strings"

"github.com/openebs/maya/pkg/util"
cstor "github.com/openebs/upgrade/pkg/migrate/cstor"
"github.com/spf13/cobra"
"k8s.io/klog"

errors "github.com/pkg/errors"
)

var (
cstorVolumeMigrateCmdHelpText = `
This command migrates the cStor Volume to csi format

Usage: migrate cstor-volume --pv-name <pv-name>
`
)

// NewMigrateCStorVolumeJob migrates all the cStor Pools associated with
// a given Storage Pool Claim
func NewMigrateCStorVolumeJob() *cobra.Command {
cmd := &cobra.Command{
Use: "cstor-volume",
Short: "Migrate cStor Volume",
Long: cstorVolumeMigrateCmdHelpText,
Example: `migrate cstor-volume <pv-name>`,
Run: func(cmd *cobra.Command, args []string) {
util.CheckErr(options.RunPreFlightChecks(), util.Fatal)
util.CheckErr(options.RunCStorVolumeMigrateChecks(), util.Fatal)
util.CheckErr(options.RunCStorVolumeMigrate(), util.Fatal)
},
}

cmd.Flags().StringVarP(&options.pvName,
"pv-name", "",
options.pvName,
"cstor Volume name to be migrated. Run \"kubectl get pv\", to get pv-name")

return cmd
}

// RunCStorVolumeMigrateChecks will ensure the sanity of the cstor Volume migrate options
func (m *MigrateOptions) RunCStorVolumeMigrateChecks() error {
if len(strings.TrimSpace(m.pvName)) == 0 {
return errors.Errorf("Cannot execute migrate job: cstor pv name is missing")
}

return nil
}

// RunCStorVolumeMigrate migrates the given pv.
func (m *MigrateOptions) RunCStorVolumeMigrate() error {

klog.Infof("Migrating volume %s to csi spec", m.pvName)
migrator := cstor.VolumeMigrator{}
err := migrator.Migrate(m.pvName, m.openebsNamespace)
if err != nil {
klog.Error(err)
return errors.Errorf("Failed to migrate cStor Volume : %s", m.pvName)
}
klog.Infof("Successfully migrated volume %s", m.pvName)

return nil
}
1 change: 1 addition & 0 deletions cmd/migrate/executor/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
type MigrateOptions struct {
openebsNamespace string
spcName string
pvName string
}

var (
Expand Down
1 change: 1 addition & 0 deletions cmd/migrate/executor/setup_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ func NewJob() *cobra.Command {

cmd.AddCommand(
NewMigratePoolJob(),
NewMigrateCStorVolumeJob(),
)

cmd.PersistentFlags().StringVarP(&options.openebsNamespace,
Expand Down
88 changes: 49 additions & 39 deletions pkg/migrate/cstor/pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ import (
csp "github.com/openebs/maya/pkg/cstor/pool/v1alpha3"
cvr "github.com/openebs/maya/pkg/cstor/volumereplica/v1alpha1"
spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1"
"github.com/openebs/maya/pkg/util/retry"
"github.com/pkg/errors"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -54,6 +53,7 @@ const (
cspiHostnameAnnotation = "cstorpoolinstance.openebs.io/hostname"
spcFinalizer = "storagepoolclaim.openebs.io/finalizer"
cspcFinalizer = "cstorpoolcluster.openebs.io/finalizer"
cspcKind = "CStorPoolCluster"
)

// CSPCMigrator ...
Expand Down Expand Up @@ -165,6 +165,10 @@ func (c *CSPCMigrator) migrate(spcName string) error {
return err
}
}
err = addSkipAnnotationToSPC(c.SPCObj)
if err != nil {
return errors.Wrap(err, "failed to add skip-validation annotation")
}
// Clean up old SPC resources after the migration is complete
err = spc.NewKubeClient().
Delete(spcName, &metav1.DeleteOptions{})
Expand Down Expand Up @@ -268,25 +272,20 @@ func (c *CSPCMigrator) cspTocspi(cspiObj *cstor.CStorPoolInstance) error {
return err
}
}
err = retry.
Times(60).
Wait(5 * time.Second).
Try(func(attempt uint) error {
klog.Infof("waiting for cspi %s to come to ONLINE state", cspiObj.Name)
cspiObj, err1 = c.OpenebsClientset.CstorV1().
CStorPoolInstances(c.OpenebsNamespace).
Get(cspiObj.Name, metav1.GetOptions{})
if err1 != nil {
return err1
}
if cspiObj.Status.Phase != "ONLINE" {
return errors.Errorf("failed to verify cspi %s phase expected: ONLINE got: %s",
cspiObj.Name, cspiObj.Status.Phase)
for {
cspiObj, err1 = c.OpenebsClientset.CstorV1().
CStorPoolInstances(c.OpenebsNamespace).
Get(cspiObj.Name, metav1.GetOptions{})
if err1 != nil {
klog.Errorf("failed to get cspi %s: %s", cspiObj.Name, err1.Error())
} else {
if cspiObj.Status.Phase == "ONLINE" {
break
}
return nil
})
if err != nil {
return err
klog.Infof("waiting for cspi %s to come to ONLINE state, got %s",
cspiObj.Name, cspiObj.Status.Phase)
}
time.Sleep(10 * time.Second)
}
err = c.updateCVRsLabels(cspObj, cspiObj)
if err != nil {
Expand Down Expand Up @@ -354,25 +353,23 @@ func (c *CSPCMigrator) scaleDownDeployment(cspObj *apis.CStorPool, openebsNamesp
if err != nil {
return err
}
err = retry.
Times(60).
Wait(5 * time.Second).
Try(func(attempt uint) error {
klog.Infof("waiting for csp %s deployment to scale down", cspObj.Name)
cspPods, err1 := c.KubeClientset.CoreV1().
Pods(openebsNamespace).
List(metav1.ListOptions{
LabelSelector: "openebs.io/cstor-pool=" + cspObj.Name,
})
if err1 != nil {
return errors.Wrapf(err1, "failed to get csp deploy")
for {
cspPods, err1 := c.KubeClientset.CoreV1().
Pods(openebsNamespace).
List(metav1.ListOptions{
LabelSelector: "openebs.io/cstor-pool=" + cspObj.Name,
})
if err1 != nil {
klog.Errorf("failed to list pods for csp %s deployment: %s", cspObj.Name, err1.Error())
} else {
if len(cspPods.Items) == 0 {
break
}
if len(cspPods.Items) != 0 {
return errors.Errorf("failed to scale down csp deployment")
}
return nil
})
return err
klog.Infof("waiting for csp %s deployment to scale down", cspObj.Name)
}
time.Sleep(10 * time.Second)
}
return nil
}

// Update the bdc with the cspc labels instead of spc labels to allow
Expand Down Expand Up @@ -418,13 +415,13 @@ func (c *CSPCMigrator) updateBDCOwnerRef() error {
return err
}
for _, bdcItem := range bdcList.Items {
if bdcItem.OwnerReferences[0].Kind != "CStorPoolCluster" {
if bdcItem.OwnerReferences[0].Kind != cspcKind {
bdcItem := bdcItem // pin it
bdcObj := &bdcItem
klog.Infof("Updating bdc %s with cspc %s ownerRef.", bdcObj.Name, c.CSPCObj.Name)
bdcObj.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(c.CSPCObj,
apis.SchemeGroupVersion.WithKind(c.CSPCObj.Kind)),
cstor.SchemeGroupVersion.WithKind(cspcKind)),
}
_, err := c.OpenebsClientset.OpenebsV1alpha1().BlockDeviceClaims(c.OpenebsNamespace).
Update(bdcObj)
Expand Down Expand Up @@ -466,3 +463,16 @@ func (c *CSPCMigrator) updateCVRsLabels(cspObj *apis.CStorPool, cspiObj *cstor.C
}
return nil
}

func addSkipAnnotationToSPC(spcObj *apis.StoragePoolClaim) error {
retry:
spcObj.Annotations = map[string]string{
"openebs.io/skip-validations": "true",
}
_, err := spc.NewKubeClient().Update(spcObj)
if k8serrors.IsConflict(err) {
klog.Errorf("failed to update spc with skip-validation annotation due to conflict error")
goto retry
}
return err
}
Loading