diff --git a/cmd/base.go b/cmd/base.go deleted file mode 100644 index cd50663..0000000 --- a/cmd/base.go +++ /dev/null @@ -1,739 +0,0 @@ -package cmd - -import ( - "database/sql" - "encoding/json" - "errors" - "fmt" - "os" - "strconv" - - "github.com/cloud-barista/cm-data-mold/config" - "github.com/cloud-barista/cm-data-mold/internal/logformatter" - "github.com/cloud-barista/cm-data-mold/pkg/nrdbms/awsdnmdb" - "github.com/cloud-barista/cm-data-mold/pkg/nrdbms/gcpfsdb" - "github.com/cloud-barista/cm-data-mold/pkg/nrdbms/ncpmgdb" - "github.com/cloud-barista/cm-data-mold/pkg/objectstorage/gcsfs" - "github.com/cloud-barista/cm-data-mold/pkg/objectstorage/s3fs" - "github.com/cloud-barista/cm-data-mold/pkg/rdbms/mysql" - "github.com/cloud-barista/cm-data-mold/pkg/utils" - "github.com/cloud-barista/cm-data-mold/service/nrdbc" - "github.com/cloud-barista/cm-data-mold/service/osc" - "github.com/cloud-barista/cm-data-mold/service/rdbc" - _ "github.com/go-sql-driver/mysql" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -func preRun(task string) func(cmd *cobra.Command, args []string) { - return func(cmd *cobra.Command, args []string) { - logrus.SetFormatter(&logformatter.CustomTextFormatter{CmdName: cmd.Parent().Use, JobName: task}) - logrus.Infof("launch an %s to %s", cmd.Parent().Use, task) - err := preRunE(cmd.Parent().Use, task) - if err != nil { - logrus.Errorf("Pre-check for %s operation errors : %v", task, err) - os.Exit(1) - } - logrus.Infof("successful pre-check %s into %s", cmd.Parent().Use, task) - } -} - -func getSrcOS() (*osc.OSController, error) { - var OSC *osc.OSController - logrus.Infof("Provider : %s", cSrcProvider) - if cSrcProvider == "aws" { - logrus.Infof("AccessKey : %s", cSrcAccessKey) - logrus.Infof("SecretKey : %s", cSrcSecretKey) - logrus.Infof("Region : %s", cSrcRegion) - logrus.Infof("BucketName : %s", cSrcBucketName) - s3c, err := config.NewS3Client(cSrcAccessKey, cSrcSecretKey, cSrcRegion) - if err != nil { - return nil, fmt.Errorf("NewS3Client error : %v", err) - } - - OSC, err = osc.New(s3fs.New(utils.AWS, s3c, cSrcBucketName, cSrcRegion), osc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, fmt.Errorf("osc error : %v", err) - } - } else if cSrcProvider == "gcp" { - logrus.Infof("CredentialsFilePath : %s", cSrcGcpCredPath) - logrus.Infof("ProjectID : %s", cSrcProjectID) - logrus.Infof("Region : %s", cSrcRegion) - logrus.Infof("BucketName : %s", cSrcBucketName) - gc, err := config.NewGCSClient(cSrcGcpCredPath) - if err != nil { - return nil, fmt.Errorf("NewGCSClient error : %v", err) - } - - OSC, err = osc.New(gcsfs.New(gc, cSrcProjectID, cSrcBucketName, cSrcRegion), osc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, fmt.Errorf("osc error : %v", err) - } - } else if cSrcProvider == "ncp" { - logrus.Infof("AccessKey : %s", cSrcAccessKey) - logrus.Infof("SecretKey : %s", cSrcSecretKey) - logrus.Infof("Endpoint : %s", cSrcEndpoint) - logrus.Infof("Region : %s", cSrcRegion) - logrus.Infof("BucketName : %s", cSrcBucketName) - s3c, err := config.NewS3ClientWithEndpoint(cSrcAccessKey, cSrcSecretKey, cSrcRegion, cSrcEndpoint) - if err != nil { - return nil, fmt.Errorf("NewS3ClientWithEndpint error : %v", err) - } - - OSC, err = osc.New(s3fs.New(utils.AWS, s3c, cSrcBucketName, cSrcRegion), osc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, fmt.Errorf("osc error : %v", err) - } - } - return OSC, nil -} - -func getDstOS() (*osc.OSController, error) { - var OSC *osc.OSController - logrus.Infof("Provider : %s", cDstProvider) - if cDstProvider == "aws" { - logrus.Infof("AccessKey : %s", cDstAccessKey) - logrus.Infof("SecretKey : %s", cDstSecretKey) - logrus.Infof("Region : %s", cDstRegion) - logrus.Infof("BucketName : %s", cDstBucketName) - s3c, err := config.NewS3Client(cDstAccessKey, cDstSecretKey, cDstRegion) - if err != nil { - return nil, fmt.Errorf("NewS3Client error : %v", err) - } - - OSC, err = osc.New(s3fs.New(utils.AWS, s3c, cDstBucketName, cDstRegion), osc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, fmt.Errorf("osc error : %v", err) - } - } else if cDstProvider == "gcp" { - logrus.Infof("CredentialsFilePath : %s", cDstGcpCredPath) - logrus.Infof("ProjectID : %s", cDstProjectID) - logrus.Infof("Region : %s", cDstRegion) - logrus.Infof("BucketName : %s", cDstBucketName) - gc, err := config.NewGCSClient(cDstGcpCredPath) - if err != nil { - return nil, fmt.Errorf("NewGCSClient error : %v", err) - } - - OSC, err = osc.New(gcsfs.New(gc, cDstProjectID, cDstBucketName, cDstRegion), osc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, fmt.Errorf("osc error : %v", err) - } - } else if cDstProvider == "ncp" { - logrus.Infof("AccessKey : %s", cDstAccessKey) - logrus.Infof("SecretKey : %s", cDstSecretKey) - logrus.Infof("Endpoint : %s", cDstEndpoint) - logrus.Infof("Region : %s", cDstRegion) - logrus.Infof("BucketName : %s", cDstBucketName) - s3c, err := config.NewS3ClientWithEndpoint(cDstAccessKey, cDstSecretKey, cDstRegion, cDstEndpoint) - if err != nil { - return nil, fmt.Errorf("NewS3ClientWithEndpint error : %v", err) - } - - OSC, err = osc.New(s3fs.New(utils.AWS, s3c, cDstBucketName, cDstRegion), osc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, fmt.Errorf("osc error : %v", err) - } - } - return OSC, nil -} - -func getSrcRDMS() (*rdbc.RDBController, error) { - logrus.Infof("Provider : %s", cSrcProvider) - logrus.Infof("Username : %s", cSrcUsername) - logrus.Infof("Password : %s", cSrcPassword) - logrus.Infof("Host : %s", cSrcHost) - logrus.Infof("Port : %s", cSrcPort) - src, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%s)/", cSrcUsername, cSrcPassword, cSrcHost, cSrcPort)) - if err != nil { - return nil, err - } - return rdbc.New(mysql.New(utils.Provider(cSrcProvider), src), rdbc.WithLogger(logrus.StandardLogger())) -} - -func getDstRDMS() (*rdbc.RDBController, error) { - logrus.Infof("Provider : %s", cDstProvider) - logrus.Infof("Username : %s", cDstUsername) - logrus.Infof("Password : %s", cDstPassword) - logrus.Infof("Host : %s", cDstHost) - logrus.Infof("Port : %s", cDstPort) - dst, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%s)/", cDstUsername, cDstPassword, cDstHost, cDstPort)) - if err != nil { - return nil, err - } - return rdbc.New(mysql.New(utils.Provider(cDstProvider), dst), rdbc.WithLogger(logrus.StandardLogger())) -} - -func getSrcNRDMS() (*nrdbc.NRDBController, error) { - - var NRDBC *nrdbc.NRDBController - logrus.Infof("Provider : %s", cSrcProvider) - if cSrcProvider == "aws" { - logrus.Infof("AccessKey : %s", cSrcAccessKey) - logrus.Infof("SecretKey : %s", cSrcSecretKey) - logrus.Infof("Region : %s", cSrcRegion) - awsnrdb, err := config.NewDynamoDBClient(cSrcAccessKey, cSrcSecretKey, cSrcRegion) - if err != nil { - return nil, err - } - - NRDBC, err = nrdbc.New(awsdnmdb.New(awsnrdb, cSrcRegion), nrdbc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, err - } - } else if cSrcProvider == "gcp" { - logrus.Infof("CredentialsFilePath : %s", cSrcGcpCredPath) - logrus.Infof("ProjectID : %s", cSrcProjectID) - logrus.Infof("Region : %s", cSrcRegion) - gcpnrdb, err := config.NewFireStoreClient(cSrcGcpCredPath, cSrcProjectID) - if err != nil { - return nil, err - } - - NRDBC, err = nrdbc.New(gcpfsdb.New(gcpnrdb, cSrcRegion), nrdbc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, err - } - } else if cSrcProvider == "ncp" { - logrus.Infof("Username : %s", cSrcUsername) - logrus.Infof("Password : %s", cSrcPassword) - logrus.Infof("Host : %s", cSrcHost) - logrus.Infof("Port : %s", cSrcPort) - port, err := strconv.Atoi(cSrcPort) - if err != nil { - return nil, err - } - - ncpnrdb, err := config.NewNCPMongoDBClient(cSrcUsername, cSrcPassword, cSrcHost, port) - if err != nil { - return nil, err - } - - NRDBC, err = nrdbc.New(ncpmgdb.New(ncpnrdb, cSrcDBName), nrdbc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, err - } - } - return NRDBC, nil -} - -func getDstNRDMS() (*nrdbc.NRDBController, error) { - var NRDBC *nrdbc.NRDBController - logrus.Infof("Provider : %s", cDstProvider) - if cDstProvider == "aws" { - logrus.Infof("AccessKey : %s", cDstAccessKey) - logrus.Infof("SecretKey : %s", cDstSecretKey) - logrus.Infof("Region : %s", cDstRegion) - awsnrdb, err := config.NewDynamoDBClient(cDstAccessKey, cDstSecretKey, cDstRegion) - if err != nil { - return nil, err - } - - NRDBC, err = nrdbc.New(awsdnmdb.New(awsnrdb, cDstRegion), nrdbc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, err - } - } else if cDstProvider == "gcp" { - logrus.Infof("CredentialsFilePath : %s", cDstGcpCredPath) - logrus.Infof("ProjectID : %s", cDstProjectID) - logrus.Infof("Region : %s", cDstRegion) - gcpnrdb, err := config.NewFireStoreClient(cDstGcpCredPath, cDstProjectID) - if err != nil { - return nil, err - } - - NRDBC, err = nrdbc.New(gcpfsdb.New(gcpnrdb, cDstRegion), nrdbc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, err - } - } else if cDstProvider == "ncp" { - logrus.Infof("Username : %s", cDstUsername) - logrus.Infof("Password : %s", cDstPassword) - logrus.Infof("Host : %s", cDstHost) - logrus.Infof("Port : %s", cDstPort) - port, err := strconv.Atoi(cDstPort) - if err != nil { - return nil, err - } - - ncpnrdb, err := config.NewNCPMongoDBClient(cDstUsername, cDstPassword, cDstHost, port) - if err != nil { - return nil, err - } - - NRDBC, err = nrdbc.New(ncpmgdb.New(ncpnrdb, cDstDBName), nrdbc.WithLogger(logrus.StandardLogger())) - if err != nil { - return nil, err - } - } - return NRDBC, nil -} - -func getConfig(credPath string) error { - data, err := os.ReadFile(credPath) - if err != nil { - return err - } - - err = json.Unmarshal(data, &configData) - if err != nil { - return err - } - return nil -} - -func preRunE(pName string, cmdName string) error { - logrus.Info("initiate a configuration scan") - if err := getConfig(credentialPath); err != nil { - return fmt.Errorf("get config error : %s", err) - } - - if cmdName == "objectstorage" { - if value, ok := configData["objectstorage"]; ok { - if !taskTarget { - if src, ok := value["src"]; ok { - if err := applyOSValue(src, "src"); err != nil { - return err - } - } - } else { - if dst, ok := value["dst"]; ok { - if err := applyOSValue(dst, "dst"); err != nil { - return err - } - } - } - } else { - return errors.New("does not exist objectstorage") - } - - if pName != "migration" && pName != "delete" { - if err := utils.IsDir(dstPath); err != nil { - return errors.New("dstPath error") - } - } else if pName == "migration" { - if value, ok := configData["objectstorage"]; ok { - if !taskTarget { - if dst, ok := value["dst"]; ok { - if err := applyOSValue(dst, "dst"); err != nil { - return err - } - } - } else { - if src, ok := value["src"]; ok { - if err := applyOSValue(src, "src"); err != nil { - return err - } - } - } - } else { - return errors.New("does not exist objectstorage dst") - } - } - } else if cmdName == "rdbms" { - if value, ok := configData["rdbms"]; ok { - if !taskTarget { - if src, ok := value["src"]; ok { - if err := applyRDMValue(src, "src"); err != nil { - return err - } - } - } else { - if value, ok := configData["rdbms"]; ok { - if dst, ok := value["dst"]; ok { - return applyRDMValue(dst, "dst") - } - } - } - } else { - return errors.New("does not exist rdbms src") - } - - if pName != "migration" && pName != "delete" { - if err := utils.IsDir(dstPath); err != nil { - return errors.New("dstPath error") - } - } else if pName == "migration" { - if value, ok := configData["rdbms"]; ok { - if !taskTarget { - if value, ok := configData["rdbms"]; ok { - if dst, ok := value["dst"]; ok { - return applyRDMValue(dst, "dst") - } - } - } else { - if src, ok := value["src"]; ok { - if err := applyRDMValue(src, "src"); err != nil { - return err - } - } - } - } else { - return errors.New("does not exist rdbms dst") - } - } - } else if cmdName == "nrdbms" { - if value, ok := configData["nrdbms"]; ok { - if !taskTarget { - if src, ok := value["src"]; ok { - if err := applyNRDMValue(src, "src"); err != nil { - return err - } - } - } else { - if dst, ok := value["dst"]; ok { - if err := applyNRDMValue(dst, "dst"); err != nil { - return err - } - } - } - } else { - return errors.New("does not exist nrdbms src") - } - - if pName != "migration" && pName != "delete" { - if err := utils.IsDir(dstPath); err != nil { - return errors.New("dstPath error") - } - } else if pName == "migration" { - if value, ok := configData["nrdbms"]; ok { - if !taskTarget { - if value, ok := configData["nrdbms"]; ok { - if dst, ok := value["dst"]; ok { - return applyNRDMValue(dst, "dst") - } - } - } else { - if src, ok := value["src"]; ok { - if err := applyNRDMValue(src, "src"); err != nil { - return err - } - } - } - } else { - return errors.New("does not exist nrdbms dst") - } - } - } - return nil -} - -func applyNRDMValue(src map[string]string, p string) error { - provider, ok := src["provider"] - if ok { - if provider != "aws" && provider != "gcp" && provider != "ncp" { - return fmt.Errorf("provider[aws,gcp,ncp] error : %s", provider) - } - } else { - return errors.New("does not exist provider") - } - - if p == "src" { - cSrcProvider = provider - } else { - cDstProvider = provider - } - - if provider == "aws" { - access, ok := src["assessKey"] - if !ok { - return errors.New("does not exist assessKey") - } - - if p == "src" { - cSrcAccessKey = access - } else { - cDstAccessKey = access - } - - secret, ok := src["secretKey"] - if !ok { - return errors.New("does not exist secretKey") - } - - if p == "src" { - cSrcSecretKey = secret - } else { - cDstSecretKey = secret - } - - region, ok := src["region"] - if !ok { - return errors.New("does not exist region") - } - - if p == "src" { - cSrcRegion = region - } else { - cDstRegion = region - } - } else if provider == "gcp" { - cred, ok := src["gcpCredPath"] - if !ok { - return errors.New("does not exist gcpCredPath") - } - if p == "src" { - cSrcGcpCredPath = cred - } else { - cDstGcpCredPath = cred - } - - projectID, ok := src["projectID"] - if !ok { - return errors.New("does not exist projectID") - } - if p == "src" { - cSrcProjectID = projectID - } else { - cDstProjectID = projectID - } - - region, ok := src["region"] - if !ok { - return errors.New("does not exist region") - } - - if p == "src" { - cSrcRegion = region - } else { - cDstRegion = region - } - } else if provider == "ncp" { - username, ok := src["username"] - if !ok { - return errors.New("does not exist username") - } - - if p == "src" { - cSrcUsername = username - } else { - cDstUsername = username - } - - password, ok := src["password"] - if !ok { - return errors.New("does not exist password") - } - - if p == "src" { - cSrcPassword = password - } else { - cDstPassword = password - } - - host, ok := src["host"] - if !ok { - return errors.New("does not exist host") - } - - if p == "src" { - cSrcHost = host - } else { - cDstHost = host - } - - port, ok := src["port"] - if !ok { - return errors.New("does not exist port") - } - - if p == "src" { - cSrcPort = port - } else { - cDstPort = port - } - - DBName, ok := src["databaseName"] - if !ok { - return errors.New("does not exist databaseName") - } - - if p == "src" { - cSrcDBName = DBName - } else { - cDstDBName = DBName - } - } - return nil -} - -func applyRDMValue(src map[string]string, p string) error { - provider, ok := src["provider"] - if ok { - if provider != "aws" && provider != "gcp" && provider != "ncp" { - return fmt.Errorf("provider[aws,gcp,ncp] error : %s", provider) - } - } else { - return errors.New("does not exist provider") - } - - if p == "src" { - cSrcProvider = provider - } else { - cDstProvider = provider - } - - username, ok := src["username"] - if !ok { - return errors.New("does not exist username") - } - - if p == "src" { - cSrcUsername = username - } else { - cDstUsername = username - } - - password, ok := src["password"] - if !ok { - return errors.New("does not exist password") - } - - if p == "src" { - cSrcPassword = password - } else { - cDstPassword = password - } - - host, ok := src["host"] - if !ok { - return errors.New("does not exist host") - } - - if p == "src" { - cSrcHost = host - } else { - cDstHost = host - } - - port, ok := src["port"] - if !ok { - return errors.New("does not exist port") - } - - if p == "src" { - cSrcPort = port - } else { - cDstPort = port - } - - return nil -} - -func applyOSValue(src map[string]string, p string) error { - provider, ok := src["provider"] - if ok { - if provider != "aws" && provider != "gcp" && provider != "ncp" { - return fmt.Errorf("provider[aws,gcp,ncp] error : %s", provider) - } - } else { - return errors.New("does not exist provider") - } - - if p == "src" { - cSrcProvider = provider - } else { - cDstProvider = provider - } - - if provider == "aws" || provider == "ncp" { - access, ok := src["assessKey"] - if !ok { - return errors.New("does not exist assessKey") - } - - if p == "src" { - cSrcAccessKey = access - } else { - cDstAccessKey = access - } - - secret, ok := src["secretKey"] - if !ok { - return errors.New("does not exist secretKey") - } - - if p == "src" { - cSrcSecretKey = secret - } else { - cDstSecretKey = secret - } - - region, ok := src["region"] - if !ok { - return errors.New("does not exist region") - } - - if p == "src" { - cSrcRegion = region - } else { - cDstRegion = region - } - - bktName, ok := src["bucketName"] - if !ok { - return errors.New("does not exist bucketName") - } - - if p == "src" { - cSrcBucketName = bktName - } else { - cDstBucketName = bktName - } - - if provider == "ncp" { - endpoint, ok := src["endpoint"] - if !ok { - return errors.New("does not exist endpoint") - } - if p == "src" { - cSrcEndpoint = endpoint - } else { - cDstEndpoint = endpoint - } - } - } - - if provider == "gcp" { - cred, ok := src["gcpCredPath"] - if !ok { - return errors.New("does not exist gcpCredPath") - } - if p == "src" { - cSrcGcpCredPath = cred - } else { - cDstGcpCredPath = cred - } - - projectID, ok := src["projectID"] - if !ok { - return errors.New("does not exist projectID") - } - if p == "src" { - cSrcProjectID = projectID - } else { - cDstProjectID = projectID - } - - region, ok := src["region"] - if !ok { - return errors.New("does not exist region") - } - if p == "src" { - cSrcRegion = region - } else { - cDstRegion = region - } - - bktName, ok := src["bucketName"] - if !ok { - return errors.New("does not exist bucketName") - } - if p == "src" { - cSrcBucketName = bktName - } else { - cDstBucketName = bktName - } - } - return nil -} diff --git a/cmd/create.go b/cmd/create.go index 5e65da3..3f62725 100644 --- a/cmd/create.go +++ b/cmd/create.go @@ -16,10 +16,8 @@ limitations under the License. package cmd import ( - "github.com/cloud-barista/cm-data-mold/internal/logformatter" - "github.com/cloud-barista/cm-data-mold/pkg/dummy/semistructed" - "github.com/cloud-barista/cm-data-mold/pkg/dummy/structed" - "github.com/cloud-barista/cm-data-mold/pkg/dummy/unstructed" + "github.com/cloud-barista/cm-data-mold/internal/execfunc" + "github.com/cloud-barista/cm-data-mold/internal/log" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -37,96 +35,26 @@ Unstructured data: png,gif,txt,zip Semi-structured data: json, xml You must enter the data size in GB.`, - RunE: func(_ *cobra.Command, _ []string) error { - logrus.SetFormatter(&logformatter.CustomTextFormatter{CmdName: "create", JobName: "dummy create"}) - logrus.Info("check directory paths") - if sqlSize != 0 { - logrus.Info("start sql generation") - if err := structed.GenerateRandomSQL(dstPath, sqlSize); err != nil { - logrus.Error("failed to generate sql") - return err - } - logrus.Infof("successfully generated sql : %s", dstPath) + Run: func(_ *cobra.Command, _ []string) { + logrus.SetFormatter(&log.CustomTextFormatter{CmdName: "create", JobName: "dummy create"}) + if err := execfunc.DummyCreate(datamoldParams); err != nil { + logrus.Errorf("dummy create failed : %v", err) } - - if csvSize != 0 { - logrus.Info("start csv generation") - if err := structed.GenerateRandomCSV(dstPath, csvSize); err != nil { - logrus.Error("failed to generate csv") - return err - } - logrus.Infof("successfully generated csv : %s", dstPath) - } - - if jsonSize != 0 { - logrus.Info("start json generation") - if err := semistructed.GenerateRandomJSON(dstPath, jsonSize); err != nil { - logrus.Error("failed to generate json") - return err - } - logrus.Infof("successfully generated json : %s", dstPath) - } - - if xmlSize != 0 { - logrus.Info("start xml generation") - if err := semistructed.GenerateRandomXML(dstPath, xmlSize); err != nil { - logrus.Error("failed to generate xml") - return err - } - logrus.Infof("successfully generated xml : %s", dstPath) - } - - if txtSize != 0 { - logrus.Info("start txt generation") - if err := unstructed.GenerateRandomTXT(dstPath, txtSize); err != nil { - logrus.Error("failed to generate txt") - return err - } - logrus.Infof("successfully generated txt : %s", dstPath) - } - - if pngSize != 0 { - logrus.Info("start png generation") - if err := unstructed.GenerateRandomPNGImage(dstPath, pngSize); err != nil { - logrus.Error("failed to generate png") - return err - } - logrus.Infof("successfully generated png : %s", dstPath) - } - - if gifSize != 0 { - logrus.Info("start gif generation") - if err := unstructed.GenerateRandomGIF(dstPath, gifSize); err != nil { - logrus.Error("failed to generate gif") - return err - } - logrus.Infof("successfully generated gif : %s", dstPath) - } - - if zipSize != 0 { - logrus.Info("start zip generation") - if err := unstructed.GenerateRandomZIP(dstPath, zipSize); err != nil { - logrus.Error("failed to generate zip") - return err - } - logrus.Infof("successfully generated zip : %s", dstPath) - } - return nil }, } func init() { rootCmd.AddCommand(createCmd) - createCmd.Flags().StringVarP(&dstPath, "dst-path", "d", "", "Directory path to create dummy data") + createCmd.Flags().StringVarP(&datamoldParams.DstPath, "dst-path", "d", "", "Directory path to create dummy data") createCmd.MarkFlagRequired("dst-path") - createCmd.Flags().IntVarP(&sqlSize, "sql-size", "s", 0, "Total size of sql files") - createCmd.Flags().IntVarP(&csvSize, "csv-size", "c", 0, "Total size of csv files") - createCmd.Flags().IntVarP(&jsonSize, "json-size", "j", 0, "Total size of json files") - createCmd.Flags().IntVarP(&xmlSize, "xml-size", "x", 0, "Total size of xml files") - createCmd.Flags().IntVarP(&txtSize, "txt-size", "t", 0, "Total size of txt files") - createCmd.Flags().IntVarP(&pngSize, "png-size", "p", 0, "Total size of png files") - createCmd.Flags().IntVarP(&gifSize, "gif-size", "g", 0, "Total size of gif files") - createCmd.Flags().IntVarP(&zipSize, "zip-size", "z", 0, "Total size of zip files") + createCmd.Flags().IntVarP(&datamoldParams.SqlSize, "sql-size", "s", 0, "Total size of sql files") + createCmd.Flags().IntVarP(&datamoldParams.CsvSize, "csv-size", "c", 0, "Total size of csv files") + createCmd.Flags().IntVarP(&datamoldParams.JsonSize, "json-size", "j", 0, "Total size of json files") + createCmd.Flags().IntVarP(&datamoldParams.XmlSize, "xml-size", "x", 0, "Total size of xml files") + createCmd.Flags().IntVarP(&datamoldParams.TxtSize, "txt-size", "t", 0, "Total size of txt files") + createCmd.Flags().IntVarP(&datamoldParams.PngSize, "png-size", "p", 0, "Total size of png files") + createCmd.Flags().IntVarP(&datamoldParams.GifSize, "gif-size", "g", 0, "Total size of gif files") + createCmd.Flags().IntVarP(&datamoldParams.ZipSize, "zip-size", "z", 0, "Total size of zip files") } diff --git a/cmd/delete.go b/cmd/delete.go index f10368f..2f873ed 100644 --- a/cmd/delete.go +++ b/cmd/delete.go @@ -18,7 +18,7 @@ package cmd import ( "os" - "github.com/cloud-barista/cm-data-mold/internal/logformatter" + "github.com/cloud-barista/cm-data-mold/internal/log" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -34,16 +34,16 @@ which are CSP or local dummy data`, var deleteDummyCmd = &cobra.Command{ Use: "dummy", - RunE: func(cmd *cobra.Command, args []string) error { - logrus.SetFormatter(&logformatter.CustomTextFormatter{CmdName: "delete"}) + Run: func(cmd *cobra.Command, args []string) { + logrus.SetFormatter(&log.CustomTextFormatter{CmdName: "delete"}) logrus.WithFields(logrus.Fields{"jobName": "dummy delete"}).Info("start deleting dummy") - err := os.RemoveAll(dstPath) - if err != nil { + + if err := os.RemoveAll(datamoldParams.DstPath); err != nil { logrus.WithFields(logrus.Fields{"jobName": "dummy delete"}).Errorf("failed to delete dummy : %v", err) - return err + return } - logrus.Infof("successfully deleted : %s\n", dstPath) - return nil + logrus.Infof("successfully deleted : %s\n", datamoldParams.DstPath) + return }, } @@ -51,7 +51,7 @@ func init() { rootCmd.AddCommand(deleteCmd) deleteCmd.AddCommand(deleteDummyCmd) - deleteCmd.PersistentFlags().BoolVarP(&taskTarget, "task", "T", false, "Select a destination(src, dst) to work with in the credential-path") - deleteDummyCmd.Flags().StringVarP(&dstPath, "dst-path", "d", "", "Delete data in directory paths") + deleteCmd.PersistentFlags().BoolVarP(&datamoldParams.TaskTarget, "task", "T", false, "Select a destination(src, dst) to work with in the credential-path") + deleteDummyCmd.Flags().StringVarP(&datamoldParams.DstPath, "dst-path", "d", "", "Delete data in directory paths") deleteDummyCmd.MarkFlagRequired("dst-path") } diff --git a/cmd/export.go b/cmd/export.go index 50f2fe8..7c57943 100644 --- a/cmd/export.go +++ b/cmd/export.go @@ -29,8 +29,8 @@ var exportCmd = &cobra.Command{ func init() { rootCmd.AddCommand(exportCmd) - exportCmd.PersistentFlags().StringVarP(&credentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") - exportCmd.PersistentFlags().StringVarP(&dstPath, "dst-path", "d", "", "Directory path to export data") - exportCmd.PersistentFlags().BoolVarP(&taskTarget, "task", "T", false, "Select a destination(src, dst) to work with in the credential-path") + exportCmd.PersistentFlags().StringVarP(&datamoldParams.CredentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") + exportCmd.PersistentFlags().StringVarP(&datamoldParams.DstPath, "dst-path", "d", "", "Directory path to export data") + exportCmd.PersistentFlags().BoolVarP(&datamoldParams.TaskTarget, "task", "T", false, "Select a destination(src, dst) to work with in the credential-path") exportCmd.MarkFlagsRequiredTogether("credential-path", "dst-path") } diff --git a/cmd/import.go b/cmd/import.go index e0fa96c..ff33bc7 100644 --- a/cmd/import.go +++ b/cmd/import.go @@ -29,8 +29,8 @@ var importCmd = &cobra.Command{ func init() { rootCmd.AddCommand(importCmd) - importCmd.PersistentFlags().StringVarP(&credentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") - importCmd.PersistentFlags().StringVarP(&dstPath, "dst-path", "d", "", "Destination path where dummy data exists") - importCmd.PersistentFlags().BoolVarP(&taskTarget, "task", "T", false, "Select a destination(src, dst) to work with in the credential-path") + importCmd.PersistentFlags().StringVarP(&datamoldParams.CredentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") + importCmd.PersistentFlags().StringVarP(&datamoldParams.DstPath, "dst-path", "d", "", "Destination path where dummy data exists") + importCmd.PersistentFlags().BoolVarP(&datamoldParams.TaskTarget, "task", "T", false, "Select a destination(src, dst) to work with in the credential-path") importCmd.MarkFlagsRequiredTogether("credential-path", "dst-path") } diff --git a/cmd/migration.go b/cmd/migration.go index 71ff82d..592f254 100644 --- a/cmd/migration.go +++ b/cmd/migration.go @@ -28,7 +28,7 @@ var migrationCmd = &cobra.Command{ func init() { rootCmd.AddCommand(migrationCmd) - migrationCmd.PersistentFlags().BoolVarP(&taskTarget, "task", "T", false, "Select a destination(src, dst) to work with in the credential-path") - migrationCmd.PersistentFlags().StringVarP(&credentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") + migrationCmd.PersistentFlags().BoolVarP(&datamoldParams.TaskTarget, "task", "T", false, "Select a destination(src, dst) to work with in the credential-path") + migrationCmd.PersistentFlags().StringVarP(&datamoldParams.CredentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") migrationCmd.MarkFlagRequired("credential-path") } diff --git a/cmd/nrdbms.go b/cmd/nrdbms.go index 3200a79..6e67f9f 100644 --- a/cmd/nrdbms.go +++ b/cmd/nrdbms.go @@ -16,14 +16,9 @@ limitations under the License. package cmd import ( - "encoding/json" - "fmt" - "io/fs" "os" - "path/filepath" - "github.com/cloud-barista/cm-data-mold/service/nrdbc" - "github.com/sirupsen/logrus" + "github.com/cloud-barista/cm-data-mold/internal/auth" "github.com/spf13/cobra" ) @@ -31,39 +26,39 @@ import ( var importNRDBCmd = &cobra.Command{ Use: "nrdbms", Aliases: []string{"ndb"}, - PreRun: preRun("nrdbms"), Run: func(cmd *cobra.Command, args []string) { - if err := importNRDMFunc(); err != nil { + auth.PreRun("nrdbms", &datamoldParams, cmd.Parent().Use) + if err := auth.ImportNRDMFunc(&datamoldParams); err != nil { os.Exit(1) } }, } var exportNRDBCmd = &cobra.Command{ - Use: "nrdbms", - PreRun: preRun("nrdbms"), + Use: "nrdbms", Run: func(cmd *cobra.Command, args []string) { - if err := exportNRDMFunc(); err != nil { + auth.PreRun("nrdbms", &datamoldParams, cmd.Parent().Use) + if err := auth.ExportNRDMFunc(&datamoldParams); err != nil { os.Exit(1) } }, } var migrationNRDBCmd = &cobra.Command{ - Use: "nrdbms", - PreRun: preRun("nrdbms"), + Use: "nrdbms", Run: func(cmd *cobra.Command, args []string) { - if err := migrationNRDMFunc(); err != nil { + auth.PreRun("nrdbms", &datamoldParams, cmd.Parent().Use) + if err := auth.MigrationNRDMFunc(&datamoldParams); err != nil { os.Exit(1) } }, } var deleteNRDBMSCmd = &cobra.Command{ - Use: "nrdbms", - PreRun: preRun("nrdbms"), + Use: "nrdbms", Run: func(cmd *cobra.Command, args []string) { - if err := deleteNRDMFunc(); err != nil { + auth.PreRun("nrdbms", &datamoldParams, cmd.Parent().Use) + if err := auth.DeleteNRDMFunc(&datamoldParams); err != nil { os.Exit(1) } }, @@ -75,178 +70,7 @@ func init() { migrationCmd.AddCommand(migrationNRDBCmd) deleteCmd.AddCommand(deleteNRDBMSCmd) - deleteNRDBMSCmd.Flags().StringVarP(&credentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") - deleteNRDBMSCmd.Flags().StringArrayVarP(&deleteTableList, "delete-table-list", "D", []string{}, "List of table names to delete") + deleteNRDBMSCmd.Flags().StringVarP(&datamoldParams.CredentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") + deleteNRDBMSCmd.Flags().StringArrayVarP(&datamoldParams.DeleteTableList, "delete-table-list", "D", []string{}, "List of table names to delete") deleteNRDBMSCmd.MarkFlagsRequiredTogether("credential-path", "delete-table-list") } - -func importNRDMFunc() error { - var NRDBC *nrdbc.NRDBController - var err error - if !taskTarget { - NRDBC, err = getSrcNRDMS() - } else { - NRDBC, err = getDstNRDMS() - } - - if err != nil { - logrus.Errorf("NRDBController error importing into nrdbms : %v", err) - return err - } - - jsonList := []string{} - err = filepath.Walk(dstPath, func(path string, info fs.FileInfo, err error) error { - if err != nil { - return err - } - if filepath.Ext(path) == ".json" { - jsonList = append(jsonList, path) - } - return nil - }) - - if err != nil { - logrus.Errorf("Walk error : %v", err) - return err - } - - var srcData []map[string]interface{} - for _, jsonFile := range jsonList { - srcData = []map[string]interface{}{} - - file, err := os.Open(jsonFile) - if err != nil { - logrus.Errorf("file open error : %v", err) - return err - } - defer file.Close() - - if err := json.NewDecoder(file).Decode(&srcData); err != nil { - logrus.Errorf("file decoding error : %v", err) - return err - } - - fileName := filepath.Base(jsonFile) - tableName := fileName[:len(fileName)-len(filepath.Ext(fileName))] - - logrus.Infof("Import start: %s", fileName) - if err := NRDBC.Put(tableName, &srcData); err != nil { - logrus.Error("Put error importing into nrdbms") - return err - } - logrus.Infof("successfully imported : %s", dstPath) - } - return nil -} - -func exportNRDMFunc() error { - var NRDBC *nrdbc.NRDBController - var err error - logrus.Infof("User Information") - if !taskTarget { - NRDBC, err = getSrcNRDMS() - } else { - NRDBC, err = getDstNRDMS() - } - if err != nil { - logrus.Errorf("NRDBController error exporting into rdbms : %v", err) - return err - } - - tableList, err := NRDBC.ListTables() - if err != nil { - logrus.Errorf("ListTables error : %v", err) - return err - } - - var dstData []map[string]interface{} - for _, table := range tableList { - logrus.Infof("Export start: %s", table) - dstData = []map[string]interface{}{} - - if err := NRDBC.Get(table, &dstData); err != nil { - logrus.Errorf("Get error : %v", err) - return err - } - - file, err := os.Create(filepath.Join(dstPath, fmt.Sprintf("%s.json", table))) - if err != nil { - logrus.Errorf("File create error : %v", err) - return err - } - defer file.Close() - - encoder := json.NewEncoder(file) - encoder.SetIndent("", " ") - if err := encoder.Encode(dstData); err != nil { - logrus.Errorf("data encoding error : %v", err) - return err - } - logrus.Infof("successfully exported : %s", file.Name()) - } - logrus.Infof("successfully exported : %s", dstPath) - return nil -} - -func migrationNRDMFunc() error { - var srcNRDBC *nrdbc.NRDBController - var srcErr error - var dstNRDBC *nrdbc.NRDBController - var dstErr error - if !taskTarget { - logrus.Infof("Source Information") - srcNRDBC, srcErr = getSrcNRDMS() - if srcErr != nil { - logrus.Errorf("NRDBController error migration into nrdbms : %v", srcErr) - return srcErr - } - logrus.Infof("Target Information") - dstNRDBC, dstErr = getDstNRDMS() - if dstErr != nil { - logrus.Errorf("NRDBController error migration into nrdbms : %v", dstErr) - return dstErr - } - } else { - logrus.Infof("Source Information") - srcNRDBC, srcErr = getDstNRDMS() - if srcErr != nil { - logrus.Errorf("NRDBController error migration into nrdbms : %v", srcErr) - return srcErr - } - logrus.Infof("Target Information") - dstNRDBC, dstErr = getSrcNRDMS() - if dstErr != nil { - logrus.Errorf("NRDBController error migration into nrdbms : %v", dstErr) - return dstErr - } - } - logrus.Info("Launch NRDBController Copy") - if err := srcNRDBC.Copy(dstNRDBC); err != nil { - logrus.Errorf("Copy error copying into nrdbms : %v", err) - return err - } - logrus.Info("successfully migrationed") - return nil -} - -func deleteNRDMFunc() error { - var NRDBC *nrdbc.NRDBController - var err error - if !taskTarget { - NRDBC, err = getSrcNRDMS() - } else { - NRDBC, err = getDstNRDMS() - } - if err != nil { - logrus.Errorf("NRDBController error deleting into nrdbms : %v", err) - return err - } - - logrus.Info("Launch NRDBController Delete") - if err := NRDBC.DeleteTables(deleteTableList...); err != nil { - logrus.Errorf("Delete error deleting into nrdbms : %v", err) - return err - } - logrus.Info("successfully deleted") - return nil -} diff --git a/cmd/objectstorage.go b/cmd/objectstorage.go index c73e0c3..51772c4 100644 --- a/cmd/objectstorage.go +++ b/cmd/objectstorage.go @@ -18,46 +18,45 @@ package cmd import ( "os" - "github.com/cloud-barista/cm-data-mold/service/osc" - "github.com/sirupsen/logrus" + "github.com/cloud-barista/cm-data-mold/internal/auth" "github.com/spf13/cobra" ) var importOSCmd = &cobra.Command{ - Use: "objectstorage", - PreRun: preRun("objectstorage"), + Use: "objectstorage", Run: func(cmd *cobra.Command, args []string) { - if err := importOSFunc(); err != nil { + auth.PreRun("objectstorage", &datamoldParams, cmd.Parent().Use) + if err := auth.ImportOSFunc(&datamoldParams); err != nil { os.Exit(1) } }, } var exportOSCmd = &cobra.Command{ - Use: "objectstorage", - PreRun: preRun("objectstorage"), + Use: "objectstorage", Run: func(cmd *cobra.Command, args []string) { - if err := exportOSFunc(); err != nil { + auth.PreRun("objectstorage", &datamoldParams, cmd.Parent().Use) + if err := auth.ExportOSFunc(&datamoldParams); err != nil { os.Exit(1) } }, } var migrationOSCmd = &cobra.Command{ - Use: "objectstorage", - PreRun: preRun("objectstorage"), + Use: "objectstorage", Run: func(cmd *cobra.Command, args []string) { - if err := migrationOSFunc(); err != nil { + auth.PreRun("objectstorage", &datamoldParams, cmd.Parent().Use) + if err := auth.MigrationOSFunc(&datamoldParams); err != nil { os.Exit(1) } }, } var deleteOSCmd = &cobra.Command{ - Use: "objectstorage", - PreRun: preRun("objectstorage"), + Use: "objectstorage", Run: func(cmd *cobra.Command, args []string) { - if err := deleteOSFunc(); err != nil { + auth.PreRun("objectstorage", &datamoldParams, cmd.Parent().Use) + if err := auth.DeleteOSFunc(&datamoldParams); err != nil { os.Exit(1) } }, @@ -69,119 +68,6 @@ func init() { migrationCmd.AddCommand(migrationOSCmd) deleteCmd.AddCommand(deleteOSCmd) - deleteOSCmd.Flags().StringVarP(&credentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") + deleteOSCmd.Flags().StringVarP(&datamoldParams.CredentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") deleteOSCmd.MarkFlagRequired("credential-path") } - -func importOSFunc() error { - var OSC *osc.OSController - var err error - logrus.Infof("User Information") - if !taskTarget { - OSC, err = getSrcOS() - } else { - OSC, err = getDstOS() - } - - if err != nil { - logrus.Errorf("OSController error importing into objectstorage : %v", err) - return err - } - - logrus.Info("Launch OSController MPut") - if err := OSC.MPut(dstPath); err != nil { - logrus.Error("MPut error importing into objectstorage") - return err - } - logrus.Infof("successfully imported : %s", dstPath) - return nil -} - -func exportOSFunc() error { - var OSC *osc.OSController - var err error - logrus.Infof("User Information") - if !taskTarget { - OSC, err = getSrcOS() - } else { - OSC, err = getDstOS() - } - if err != nil { - logrus.Errorf("OSController error exporting into objectstorage : %v", err) - return err - } - - logrus.Info("Launch OSController MGet") - if err := OSC.MGet(dstPath); err != nil { - logrus.Errorf("MGet error exporting into objectstorage : %v", err) - return err - } - logrus.Infof("successfully exported : %s", dstPath) - return nil -} - -func migrationOSFunc() error { - var src *osc.OSController - var srcErr error - var dst *osc.OSController - var dstErr error - if !taskTarget { - logrus.Infof("Source Information") - src, srcErr = getSrcOS() - if srcErr != nil { - logrus.Errorf("OSController error migration into objectstorage : %v", srcErr) - return srcErr - } - logrus.Infof("Target Information") - dst, dstErr = getDstOS() - if dstErr != nil { - logrus.Errorf("OSController error migration into objectstorage : %v", dstErr) - return dstErr - } - } else { - logrus.Infof("Source Information") - src, srcErr = getDstOS() - if srcErr != nil { - logrus.Errorf("OSController error migration into objectstorage : %v", srcErr) - return srcErr - } - logrus.Infof("Target Information") - dst, dstErr = getSrcOS() - if dstErr != nil { - logrus.Errorf("OSController error migration into objectstorage : %v", dstErr) - return dstErr - } - } - - logrus.Info("Launch OSController Copy") - if err := src.Copy(dst); err != nil { - logrus.Errorf("Copy error copying into objectstorage : %v", err) - return err - } - logrus.Info("successfully migrationed") - return nil -} - -func deleteOSFunc() error { - var OSC *osc.OSController - var err error - logrus.Infof("User Information") - if !taskTarget { - OSC, err = getSrcOS() - } else { - OSC, err = getDstOS() - } - if err != nil { - logrus.Errorf("OSController error deleting into objectstorage : %v", err) - return err - } - - logrus.Info("Launch OSController Delete") - if err := OSC.DeleteBucket(); err != nil { - logrus.Errorf("Delete error deleting into objectstorage : %v", err) - return err - } - logrus.Info("successfully deleted") - - return nil -} diff --git a/cmd/rdbms.go b/cmd/rdbms.go index f1f16e1..781e305 100644 --- a/cmd/rdbms.go +++ b/cmd/rdbms.go @@ -16,52 +16,48 @@ limitations under the License. package cmd import ( - "fmt" - "io/fs" "os" - "path/filepath" - "github.com/cloud-barista/cm-data-mold/service/rdbc" - "github.com/sirupsen/logrus" + "github.com/cloud-barista/cm-data-mold/internal/auth" "github.com/spf13/cobra" ) // rdbmsCmd represents the rdbms command var importRDBCmd = &cobra.Command{ - Use: "rdbms", - PreRun: preRun("rdbms"), + Use: "rdbms", Run: func(cmd *cobra.Command, args []string) { - if err := importRDMFunc(); err != nil { + auth.PreRun("rdbms", &datamoldParams, cmd.Parent().Use) + if err := auth.ImportRDMFunc(&datamoldParams); err != nil { os.Exit(1) } }, } var exportRDBCmd = &cobra.Command{ - Use: "rdbms", - PreRun: preRun("rdbms"), + Use: "rdbms", Run: func(cmd *cobra.Command, args []string) { - if err := exportRDMFunc(); err != nil { + auth.PreRun("rdbms", &datamoldParams, cmd.Parent().Use) + if err := auth.ExportRDMFunc(&datamoldParams); err != nil { os.Exit(1) } }, } var migrationRDBCmd = &cobra.Command{ - Use: "rdbms", - PreRun: preRun("rdbms"), + Use: "rdbms", Run: func(cmd *cobra.Command, args []string) { - if err := migrationRDMFunc(); err != nil { + auth.PreRun("rdbms", &datamoldParams, cmd.Parent().Use) + if err := auth.MigrationRDMFunc(&datamoldParams); err != nil { os.Exit(1) } }, } var deleteRDBMSCmd = &cobra.Command{ - Use: "rdbms", - PreRun: preRun("rdbms"), + Use: "rdbms", Run: func(cmd *cobra.Command, args []string) { - if err := deleteRDMFunc(); err != nil { + auth.PreRun("rdbms", &datamoldParams, cmd.Parent().Use) + if err := auth.DeleteRDMFunc(&datamoldParams); err != nil { os.Exit(1) } }, @@ -73,171 +69,7 @@ func init() { migrationCmd.AddCommand(migrationRDBCmd) deleteCmd.AddCommand(deleteRDBMSCmd) - deleteRDBMSCmd.Flags().StringVarP(&credentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") - deleteRDBMSCmd.Flags().StringArrayVarP(&deleteDBList, "delete-db-list", "D", []string{}, "List of db names to delete") + deleteRDBMSCmd.Flags().StringVarP(&datamoldParams.CredentialPath, "credential-path", "C", "", "Json file path containing the user's credentials") + deleteRDBMSCmd.Flags().StringArrayVarP(&datamoldParams.DeleteDBList, "delete-db-list", "D", []string{}, "List of db names to delete") deleteRDBMSCmd.MarkFlagsRequiredTogether("credential-path", "delete-db-list") } - -func importRDMFunc() error { - var RDBC *rdbc.RDBController - var err error - logrus.Infof("User Information") - if !taskTarget { - RDBC, err = getSrcRDMS() - } else { - RDBC, err = getDstRDMS() - } - if err != nil { - logrus.Errorf("RDBController error importing into rdbms : %v", err) - return err - } - - sqlList := []string{} - err = filepath.Walk(dstPath, func(path string, info fs.FileInfo, err error) error { - if err != nil { - return err - } - if filepath.Ext(path) == ".sql" { - sqlList = append(sqlList, path) - } - return nil - }) - if err != nil { - logrus.Errorf("Walk error : %v", err) - return err - } - - for _, sqlPath := range sqlList { - data, err := os.ReadFile(sqlPath) - if err != nil { - logrus.Errorf("ReadFile error : %v", err) - return err - } - logrus.Infof("Import start: %s", sqlPath) - if err := RDBC.Put(string(data)); err != nil { - logrus.Error("Put error importing into rdbms") - return err - } - logrus.Infof("Import success: %s", sqlPath) - } - logrus.Infof("successfully imported : %s", dstPath) - return nil -} - -func exportRDMFunc() error { - var RDBC *rdbc.RDBController - var err error - logrus.Infof("User Information") - if !taskTarget { - RDBC, err = getSrcRDMS() - } else { - RDBC, err = getDstRDMS() - } - if err != nil { - logrus.Errorf("RDBController error exporting into rdbms : %v", err) - return err - } - - err = os.MkdirAll(dstPath, 0755) - if err != nil { - logrus.Errorf("MkdirAll error : %v", err) - return err - } - - dbList := []string{} - if err := RDBC.ListDB(&dbList); err != nil { - logrus.Errorf("ListDB error : %v", err) - return err - } - - var sqlData string - for _, db := range dbList { - sqlData = "" - logrus.Infof("Export start: %s", db) - if err := RDBC.Get(db, &sqlData); err != nil { - logrus.Errorf("Get error : %v", err) - return err - } - - file, err := os.Create(filepath.Join(dstPath, fmt.Sprintf("%s.sql", db))) - if err != nil { - logrus.Errorf("File create error : %v", err) - return err - } - defer file.Close() - - _, err = file.WriteString(sqlData) - if err != nil { - logrus.Errorf("File write error : %v", err) - return err - } - logrus.Infof("successfully exported : %s", file.Name()) - file.Close() - } - logrus.Infof("successfully exported : %s", dstPath) - return nil -} - -func migrationRDMFunc() error { - var srcRDBC *rdbc.RDBController - var srcErr error - var dstRDBC *rdbc.RDBController - var dstErr error - if !taskTarget { - logrus.Infof("Source Information") - srcRDBC, srcErr = getSrcRDMS() - if srcErr != nil { - logrus.Errorf("RDBController error migration into rdbms : %v", srcErr) - return srcErr - } - logrus.Infof("Target Information") - dstRDBC, dstErr = getDstRDMS() - if dstErr != nil { - logrus.Errorf("RDBController error migration into rdbms : %v", dstErr) - return dstErr - } - } else { - logrus.Infof("Source Information") - srcRDBC, srcErr = getDstRDMS() - if srcErr != nil { - logrus.Errorf("RDBController error migration into rdbms : %v", srcErr) - return srcErr - } - logrus.Infof("Target Information") - dstRDBC, dstErr = getSrcRDMS() - if dstErr != nil { - logrus.Errorf("RDBController error migration into rdbms : %v", dstErr) - return dstErr - } - } - - logrus.Info("Launch RDBController Copy") - if err := srcRDBC.Copy(dstRDBC); err != nil { - logrus.Errorf("Copy error copying into rdbms : %v", err) - return err - } - logrus.Info("successfully migrationed") - return nil -} - -func deleteRDMFunc() error { - var RDBC *rdbc.RDBController - var err error - if !taskTarget { - RDBC, err = getSrcRDMS() - } else { - RDBC, err = getDstRDMS() - } - if err != nil { - logrus.Errorf("RDBController error deleting into rdbms : %v", err) - return err - } - - logrus.Info("Launch RDBController Delete") - if err := RDBC.DeleteDB(deleteDBList...); err != nil { - logrus.Errorf("Delete error deleting into rdbms : %v", err) - return err - } - logrus.Info("successfully deleted") - return nil -} diff --git a/cmd/root.go b/cmd/root.go index 804dea8..8e7e3b5 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -16,76 +16,14 @@ limitations under the License. package cmd import ( - "io" "os" - "github.com/cloud-barista/cm-data-mold/internal/logformatter" - "github.com/sirupsen/logrus" - + "github.com/cloud-barista/cm-data-mold/internal/auth" + "github.com/cloud-barista/cm-data-mold/internal/log" "github.com/spf13/cobra" ) -var ( - // credential - credentialPath string - configData map[string]map[string]map[string]string - taskTarget bool - - //src - cSrcProvider string - cSrcAccessKey string - cSrcSecretKey string - cSrcRegion string - cSrcBucketName string - cSrcGcpCredPath string - cSrcProjectID string - cSrcEndpoint string - cSrcUsername string - cSrcPassword string - cSrcHost string - cSrcPort string - cSrcDBName string - - //dst - cDstProvider string - cDstAccessKey string - cDstSecretKey string - cDstRegion string - cDstBucketName string - cDstGcpCredPath string - cDstProjectID string - cDstEndpoint string - cDstUsername string - cDstPassword string - cDstHost string - cDstPort string - cDstDBName string - - // dummy - dstPath string - sqlSize int - csvSize int - jsonSize int - xmlSize int - txtSize int - pngSize int - gifSize int - zipSize int - - deleteDBList []string - deleteTableList []string -) - -func logFile() { - logFile, err := os.OpenFile("./datamold.log", os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - logrus.WithError(err).Fatal("Failed to create log file") - } - - logrus.SetLevel(logrus.DebugLevel) - logrus.SetFormatter(&logformatter.CustomTextFormatter{}) - logrus.SetOutput(io.MultiWriter(os.Stdout, logFile)) -} +var datamoldParams auth.DatamoldParams // rootCmd represents the base command when called without any subcommands var rootCmd = &cobra.Command{ @@ -94,7 +32,7 @@ var rootCmd = &cobra.Command{ Long: `It is a tool that builds an environment for verification of data migration technology and generates test data necessary for data migration.`, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - logFile() + log.LogFile() return nil }, } diff --git a/cmd/server.go b/cmd/server.go index 6737e57..1d245ed 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -4,7 +4,7 @@ Copyright © 2023 NAME HERE package cmd import ( - "github.com/cloud-barista/cm-data-mold/internal/logformatter" + "github.com/cloud-barista/cm-data-mold/internal/log" dmsv "github.com/cloud-barista/cm-data-mold/websrc/serve" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -18,7 +18,7 @@ var serverCmd = &cobra.Command{ Short: "Start Web Server", Long: `Start Web Server`, Run: func(cmd *cobra.Command, args []string) { - logrus.SetFormatter(&logformatter.CustomTextFormatter{CmdName: "server", JobName: "web server"}) + logrus.SetFormatter(&log.CustomTextFormatter{CmdName: "server", JobName: "web server"}) logrus.Info("Start Web Server") dmsv.Run(dmsv.InitServer(), listenPort) }, diff --git a/config/config.go b/config/config.go index 5eede49..8e70028 100644 --- a/config/config.go +++ b/config/config.go @@ -124,7 +124,7 @@ func NewDynamoDBClientWithEndpoint(accesskey, secretkey, region string, endpoint return dynamodb.NewFromConfig(*cfg), nil } -func NewGCSClient(credentialsFile string) (*storage.Client, error) { +func NewGCPClient(credentialsFile string) (*storage.Client, error) { client, err := storage.NewClient( context.TODO(), option.WithCredentialsFile(credentialsFile), diff --git a/internal/auth/base.go b/internal/auth/base.go new file mode 100644 index 0000000..1d9d4d4 --- /dev/null +++ b/internal/auth/base.go @@ -0,0 +1,735 @@ +package auth + +import ( + "database/sql" + "encoding/json" + "errors" + "fmt" + "os" + "strconv" + + "github.com/cloud-barista/cm-data-mold/config" + "github.com/cloud-barista/cm-data-mold/internal/log" + "github.com/cloud-barista/cm-data-mold/pkg/nrdbms/awsdnmdb" + "github.com/cloud-barista/cm-data-mold/pkg/nrdbms/gcpfsdb" + "github.com/cloud-barista/cm-data-mold/pkg/nrdbms/ncpmgdb" + "github.com/cloud-barista/cm-data-mold/pkg/objectstorage/gcpfs" + "github.com/cloud-barista/cm-data-mold/pkg/objectstorage/s3fs" + "github.com/cloud-barista/cm-data-mold/pkg/rdbms/mysql" + "github.com/cloud-barista/cm-data-mold/pkg/utils" + "github.com/cloud-barista/cm-data-mold/service/nrdbc" + "github.com/cloud-barista/cm-data-mold/service/osc" + "github.com/cloud-barista/cm-data-mold/service/rdbc" + _ "github.com/go-sql-driver/mysql" + "github.com/sirupsen/logrus" +) + +func PreRun(task string, datamoldParams *DatamoldParams, use string) { + logrus.SetFormatter(&log.CustomTextFormatter{CmdName: use, JobName: task}) + logrus.Infof("launch an %s to %s", use, task) + err := preRunE(use, task, datamoldParams) + if err != nil { + logrus.Errorf("Pre-check for %s operation errors : %v", task, err) + os.Exit(1) + } + logrus.Infof("successful pre-check %s into %s", use, task) +} + +func GetSrcOS(datamoldParams *DatamoldParams) (*osc.OSController, error) { + var OSC *osc.OSController + logrus.Infof("Provider : %s", datamoldParams.SrcProvider) + if datamoldParams.SrcProvider == "aws" { + logrus.Infof("AccessKey : %s", datamoldParams.SrcAccessKey) + logrus.Infof("SecretKey : %s", datamoldParams.SrcSecretKey) + logrus.Infof("Region : %s", datamoldParams.SrcRegion) + logrus.Infof("BucketName : %s", datamoldParams.SrcBucketName) + s3c, err := config.NewS3Client(datamoldParams.SrcAccessKey, datamoldParams.SrcSecretKey, datamoldParams.SrcRegion) + if err != nil { + return nil, fmt.Errorf("NewS3Client error : %v", err) + } + + OSC, err = osc.New(s3fs.New(utils.AWS, s3c, datamoldParams.SrcBucketName, datamoldParams.SrcRegion), osc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, fmt.Errorf("osc error : %v", err) + } + } else if datamoldParams.SrcProvider == "gcp" { + logrus.Infof("CredentialsFilePath : %s", datamoldParams.SrcGcpCredPath) + logrus.Infof("ProjectID : %s", datamoldParams.SrcProjectID) + logrus.Infof("Region : %s", datamoldParams.SrcRegion) + logrus.Infof("BucketName : %s", datamoldParams.SrcBucketName) + gc, err := config.NewGCPClient(datamoldParams.SrcGcpCredPath) + if err != nil { + return nil, fmt.Errorf("NewGCPClient error : %v", err) + } + + OSC, err = osc.New(gcpfs.New(gc, datamoldParams.SrcProjectID, datamoldParams.SrcBucketName, datamoldParams.SrcRegion), osc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, fmt.Errorf("osc error : %v", err) + } + } else if datamoldParams.SrcProvider == "ncp" { + logrus.Infof("AccessKey : %s", datamoldParams.SrcAccessKey) + logrus.Infof("SecretKey : %s", datamoldParams.SrcSecretKey) + logrus.Infof("Endpoint : %s", datamoldParams.SrcEndpoint) + logrus.Infof("Region : %s", datamoldParams.SrcRegion) + logrus.Infof("BucketName : %s", datamoldParams.SrcBucketName) + s3c, err := config.NewS3ClientWithEndpoint(datamoldParams.SrcAccessKey, datamoldParams.SrcSecretKey, datamoldParams.SrcRegion, datamoldParams.SrcEndpoint) + if err != nil { + return nil, fmt.Errorf("NewS3ClientWithEndpint error : %v", err) + } + + OSC, err = osc.New(s3fs.New(utils.AWS, s3c, datamoldParams.SrcBucketName, datamoldParams.SrcRegion), osc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, fmt.Errorf("osc error : %v", err) + } + } + return OSC, nil +} + +func GetDstOS(datamoldParams *DatamoldParams) (*osc.OSController, error) { + var OSC *osc.OSController + logrus.Infof("Provider : %s", datamoldParams.DstProvider) + if datamoldParams.DstProvider == "aws" { + logrus.Infof("AccessKey : %s", datamoldParams.DstAccessKey) + logrus.Infof("SecretKey : %s", datamoldParams.DstSecretKey) + logrus.Infof("Region : %s", datamoldParams.DstRegion) + logrus.Infof("BucketName : %s", datamoldParams.DstBucketName) + s3c, err := config.NewS3Client(datamoldParams.DstAccessKey, datamoldParams.DstSecretKey, datamoldParams.DstRegion) + if err != nil { + return nil, fmt.Errorf("NewS3Client error : %v", err) + } + + OSC, err = osc.New(s3fs.New(utils.AWS, s3c, datamoldParams.DstBucketName, datamoldParams.DstRegion), osc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, fmt.Errorf("osc error : %v", err) + } + } else if datamoldParams.DstProvider == "gcp" { + logrus.Infof("CredentialsFilePath : %s", datamoldParams.DstGcpCredPath) + logrus.Infof("ProjectID : %s", datamoldParams.DstProjectID) + logrus.Infof("Region : %s", datamoldParams.DstRegion) + logrus.Infof("BucketName : %s", datamoldParams.DstBucketName) + gc, err := config.NewGCPClient(datamoldParams.DstGcpCredPath) + if err != nil { + return nil, fmt.Errorf("NewGCPClient error : %v", err) + } + + OSC, err = osc.New(gcpfs.New(gc, datamoldParams.DstProjectID, datamoldParams.DstBucketName, datamoldParams.DstRegion), osc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, fmt.Errorf("osc error : %v", err) + } + } else if datamoldParams.DstProvider == "ncp" { + logrus.Infof("AccessKey : %s", datamoldParams.DstAccessKey) + logrus.Infof("SecretKey : %s", datamoldParams.DstSecretKey) + logrus.Infof("Endpoint : %s", datamoldParams.DstEndpoint) + logrus.Infof("Region : %s", datamoldParams.DstRegion) + logrus.Infof("BucketName : %s", datamoldParams.DstBucketName) + s3c, err := config.NewS3ClientWithEndpoint(datamoldParams.DstAccessKey, datamoldParams.DstSecretKey, datamoldParams.DstRegion, datamoldParams.DstEndpoint) + if err != nil { + return nil, fmt.Errorf("NewS3ClientWithEndpint error : %v", err) + } + + OSC, err = osc.New(s3fs.New(utils.AWS, s3c, datamoldParams.DstBucketName, datamoldParams.DstRegion), osc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, fmt.Errorf("osc error : %v", err) + } + } + return OSC, nil +} + +func GetSrcRDMS(datamoldParams *DatamoldParams) (*rdbc.RDBController, error) { + logrus.Infof("Provider : %s", datamoldParams.SrcProvider) + logrus.Infof("Username : %s", datamoldParams.SrcUsername) + logrus.Infof("Password : %s", datamoldParams.SrcPassword) + logrus.Infof("Host : %s", datamoldParams.SrcHost) + logrus.Infof("Port : %s", datamoldParams.SrcPort) + src, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%s)/", datamoldParams.SrcUsername, datamoldParams.SrcPassword, datamoldParams.SrcHost, datamoldParams.SrcPort)) + if err != nil { + return nil, err + } + return rdbc.New(mysql.New(utils.Provider(datamoldParams.SrcProvider), src), rdbc.WithLogger(logrus.StandardLogger())) +} + +func GetDstRDMS(datamoldParams *DatamoldParams) (*rdbc.RDBController, error) { + logrus.Infof("Provider : %s", datamoldParams.DstProvider) + logrus.Infof("Username : %s", datamoldParams.DstUsername) + logrus.Infof("Password : %s", datamoldParams.DstPassword) + logrus.Infof("Host : %s", datamoldParams.DstHost) + logrus.Infof("Port : %s", datamoldParams.DstPort) + dst, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%s)/", datamoldParams.DstUsername, datamoldParams.DstPassword, datamoldParams.DstHost, datamoldParams.DstPort)) + if err != nil { + return nil, err + } + return rdbc.New(mysql.New(utils.Provider(datamoldParams.DstProvider), dst), rdbc.WithLogger(logrus.StandardLogger())) +} + +func GetSrcNRDMS(datamoldParams *DatamoldParams) (*nrdbc.NRDBController, error) { + var NRDBC *nrdbc.NRDBController + logrus.Infof("Provider : %s", datamoldParams.SrcProvider) + if datamoldParams.SrcProvider == "aws" { + logrus.Infof("AccessKey : %s", datamoldParams.SrcAccessKey) + logrus.Infof("SecretKey : %s", datamoldParams.SrcSecretKey) + logrus.Infof("Region : %s", datamoldParams.SrcRegion) + awsnrdb, err := config.NewDynamoDBClient(datamoldParams.SrcAccessKey, datamoldParams.SrcSecretKey, datamoldParams.SrcRegion) + if err != nil { + return nil, err + } + + NRDBC, err = nrdbc.New(awsdnmdb.New(awsnrdb, datamoldParams.SrcRegion), nrdbc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, err + } + } else if datamoldParams.SrcProvider == "gcp" { + logrus.Infof("CredentialsFilePath : %s", datamoldParams.SrcGcpCredPath) + logrus.Infof("ProjectID : %s", datamoldParams.SrcProjectID) + logrus.Infof("Region : %s", datamoldParams.SrcRegion) + gcpnrdb, err := config.NewFireStoreClient(datamoldParams.SrcGcpCredPath, datamoldParams.SrcProjectID) + if err != nil { + return nil, err + } + + NRDBC, err = nrdbc.New(gcpfsdb.New(gcpnrdb, datamoldParams.SrcRegion), nrdbc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, err + } + } else if datamoldParams.SrcProvider == "ncp" { + logrus.Infof("Username : %s", datamoldParams.SrcUsername) + logrus.Infof("Password : %s", datamoldParams.SrcPassword) + logrus.Infof("Host : %s", datamoldParams.SrcHost) + logrus.Infof("Port : %s", datamoldParams.SrcPort) + port, err := strconv.Atoi(datamoldParams.SrcPort) + if err != nil { + return nil, err + } + + ncpnrdb, err := config.NewNCPMongoDBClient(datamoldParams.SrcUsername, datamoldParams.SrcPassword, datamoldParams.SrcHost, port) + if err != nil { + return nil, err + } + + NRDBC, err = nrdbc.New(ncpmgdb.New(ncpnrdb, datamoldParams.SrcDBName), nrdbc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, err + } + } + return NRDBC, nil +} + +func GetDstNRDMS(datamoldParams *DatamoldParams) (*nrdbc.NRDBController, error) { + var NRDBC *nrdbc.NRDBController + logrus.Infof("Provider : %s", datamoldParams.DstProvider) + if datamoldParams.DstProvider == "aws" { + logrus.Infof("AccessKey : %s", datamoldParams.DstAccessKey) + logrus.Infof("SecretKey : %s", datamoldParams.DstSecretKey) + logrus.Infof("Region : %s", datamoldParams.DstRegion) + awsnrdb, err := config.NewDynamoDBClient(datamoldParams.DstAccessKey, datamoldParams.DstSecretKey, datamoldParams.DstRegion) + if err != nil { + return nil, err + } + + NRDBC, err = nrdbc.New(awsdnmdb.New(awsnrdb, datamoldParams.DstRegion), nrdbc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, err + } + } else if datamoldParams.DstProvider == "gcp" { + logrus.Infof("CredentialsFilePath : %s", datamoldParams.DstGcpCredPath) + logrus.Infof("ProjectID : %s", datamoldParams.DstProjectID) + logrus.Infof("Region : %s", datamoldParams.DstRegion) + gcpnrdb, err := config.NewFireStoreClient(datamoldParams.DstGcpCredPath, datamoldParams.DstProjectID) + if err != nil { + return nil, err + } + + NRDBC, err = nrdbc.New(gcpfsdb.New(gcpnrdb, datamoldParams.DstRegion), nrdbc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, err + } + } else if datamoldParams.DstProvider == "ncp" { + logrus.Infof("Username : %s", datamoldParams.DstUsername) + logrus.Infof("Password : %s", datamoldParams.DstPassword) + logrus.Infof("Host : %s", datamoldParams.DstHost) + logrus.Infof("Port : %s", datamoldParams.DstPort) + port, err := strconv.Atoi(datamoldParams.DstPort) + if err != nil { + return nil, err + } + + ncpnrdb, err := config.NewNCPMongoDBClient(datamoldParams.DstUsername, datamoldParams.DstPassword, datamoldParams.DstHost, port) + if err != nil { + return nil, err + } + + NRDBC, err = nrdbc.New(ncpmgdb.New(ncpnrdb, datamoldParams.DstDBName), nrdbc.WithLogger(logrus.StandardLogger())) + if err != nil { + return nil, err + } + } + return NRDBC, nil +} + +func GetConfig(credPath string, ConfigData *map[string]map[string]map[string]string) error { + data, err := os.ReadFile(credPath) + if err != nil { + return err + } + + err = json.Unmarshal(data, ConfigData) + if err != nil { + return err + } + return nil +} + +func preRunE(pName string, cmdName string, datamoldParams *DatamoldParams) error { + logrus.Info("initiate a configuration scan") + if err := GetConfig(datamoldParams.CredentialPath, &datamoldParams.ConfigData); err != nil { + return fmt.Errorf("get config error : %s", err) + } + + if cmdName == "objectstorage" { + if value, ok := datamoldParams.ConfigData["objectstorage"]; ok { + if !datamoldParams.TaskTarget { + if src, ok := value["src"]; ok { + if err := applyOSValue(src, "src", datamoldParams); err != nil { + return err + } + } + } else { + if dst, ok := value["dst"]; ok { + if err := applyOSValue(dst, "dst", datamoldParams); err != nil { + return err + } + } + } + } else { + return errors.New("does not exist objectstorage") + } + + if pName != "migration" && pName != "delete" { + if err := utils.IsDir(datamoldParams.DstPath); err != nil { + return errors.New("dstPath error") + } + } else if pName == "migration" { + if value, ok := datamoldParams.ConfigData["objectstorage"]; ok { + if !datamoldParams.TaskTarget { + if dst, ok := value["dst"]; ok { + if err := applyOSValue(dst, "dst", datamoldParams); err != nil { + return err + } + } + } else { + if src, ok := value["src"]; ok { + if err := applyOSValue(src, "src", datamoldParams); err != nil { + return err + } + } + } + } else { + return errors.New("does not exist objectstorage dst") + } + } + } else if cmdName == "rdbms" { + if value, ok := datamoldParams.ConfigData["rdbms"]; ok { + if !datamoldParams.TaskTarget { + if src, ok := value["src"]; ok { + if err := applyRDMValue(src, "src", datamoldParams); err != nil { + return err + } + } + } else { + if value, ok := datamoldParams.ConfigData["rdbms"]; ok { + if dst, ok := value["dst"]; ok { + return applyRDMValue(dst, "dst", datamoldParams) + } + } + } + } else { + return errors.New("does not exist rdbms src") + } + + if pName != "migration" && pName != "delete" { + if err := utils.IsDir(datamoldParams.DstPath); err != nil { + return errors.New("dstPath error") + } + } else if pName == "migration" { + if value, ok := datamoldParams.ConfigData["rdbms"]; ok { + if !datamoldParams.TaskTarget { + if value, ok := datamoldParams.ConfigData["rdbms"]; ok { + if dst, ok := value["dst"]; ok { + return applyRDMValue(dst, "dst", datamoldParams) + } + } + } else { + if src, ok := value["src"]; ok { + if err := applyRDMValue(src, "src", datamoldParams); err != nil { + return err + } + } + } + } else { + return errors.New("does not exist rdbms dst") + } + } + } else if cmdName == "nrdbms" { + if value, ok := datamoldParams.ConfigData["nrdbms"]; ok { + if !datamoldParams.TaskTarget { + if src, ok := value["src"]; ok { + if err := applyNRDMValue(src, "src", datamoldParams); err != nil { + return err + } + } + } else { + if dst, ok := value["dst"]; ok { + if err := applyNRDMValue(dst, "dst", datamoldParams); err != nil { + return err + } + } + } + } else { + return errors.New("does not exist nrdbms src") + } + + if pName != "migration" && pName != "delete" { + if err := utils.IsDir(datamoldParams.DstPath); err != nil { + return errors.New("dstPath error") + } + } else if pName == "migration" { + if value, ok := datamoldParams.ConfigData["nrdbms"]; ok { + if !datamoldParams.TaskTarget { + if value, ok := datamoldParams.ConfigData["nrdbms"]; ok { + if dst, ok := value["dst"]; ok { + return applyNRDMValue(dst, "dst", datamoldParams) + } + } + } else { + if src, ok := value["src"]; ok { + if err := applyNRDMValue(src, "src", datamoldParams); err != nil { + return err + } + } + } + } else { + return errors.New("does not exist nrdbms dst") + } + } + } + return nil +} + +func applyNRDMValue(src map[string]string, p string, datamoldParams *DatamoldParams) error { + provider, ok := src["provider"] + if ok { + if provider != "aws" && provider != "gcp" && provider != "ncp" { + return fmt.Errorf("provider[aws,gcp,ncp] error : %s", provider) + } + } else { + return errors.New("does not exist provider") + } + + if p == "src" { + datamoldParams.SrcProvider = provider + } else { + datamoldParams.DstProvider = provider + } + + if provider == "aws" { + access, ok := src["assessKey"] + if !ok { + return errors.New("does not exist assessKey") + } + + if p == "src" { + datamoldParams.SrcAccessKey = access + } else { + datamoldParams.DstAccessKey = access + } + + secret, ok := src["secretKey"] + if !ok { + return errors.New("does not exist secretKey") + } + + if p == "src" { + datamoldParams.SrcSecretKey = secret + } else { + datamoldParams.DstSecretKey = secret + } + + region, ok := src["region"] + if !ok { + return errors.New("does not exist region") + } + + if p == "src" { + datamoldParams.SrcRegion = region + } else { + datamoldParams.DstRegion = region + } + } else if provider == "gcp" { + cred, ok := src["gcpCredPath"] + if !ok { + return errors.New("does not exist gcpCredPath") + } + if p == "src" { + datamoldParams.SrcGcpCredPath = cred + } else { + datamoldParams.DstGcpCredPath = cred + } + + projectID, ok := src["projectID"] + if !ok { + return errors.New("does not exist projectID") + } + if p == "src" { + datamoldParams.SrcProjectID = projectID + } else { + datamoldParams.DstProjectID = projectID + } + + region, ok := src["region"] + if !ok { + return errors.New("does not exist region") + } + + if p == "src" { + datamoldParams.SrcRegion = region + } else { + datamoldParams.DstRegion = region + } + } else if provider == "ncp" { + username, ok := src["username"] + if !ok { + return errors.New("does not exist username") + } + + if p == "src" { + datamoldParams.SrcUsername = username + } else { + datamoldParams.DstUsername = username + } + + password, ok := src["password"] + if !ok { + return errors.New("does not exist password") + } + + if p == "src" { + datamoldParams.SrcPassword = password + } else { + datamoldParams.DstPassword = password + } + + host, ok := src["host"] + if !ok { + return errors.New("does not exist host") + } + + if p == "src" { + datamoldParams.SrcHost = host + } else { + datamoldParams.DstHost = host + } + + port, ok := src["port"] + if !ok { + return errors.New("does not exist port") + } + + if p == "src" { + datamoldParams.SrcPort = port + } else { + datamoldParams.DstPort = port + } + + DBName, ok := src["databaseName"] + if !ok { + return errors.New("does not exist databaseName") + } + + if p == "src" { + datamoldParams.SrcDBName = DBName + } else { + datamoldParams.DstDBName = DBName + } + } + return nil +} + +func applyRDMValue(src map[string]string, p string, datamoldParams *DatamoldParams) error { + provider, ok := src["provider"] + if ok { + if provider != "aws" && provider != "gcp" && provider != "ncp" { + return fmt.Errorf("provider[aws,gcp,ncp] error : %s", provider) + } + } else { + return errors.New("does not exist provider") + } + + if p == "src" { + datamoldParams.SrcProvider = provider + } else { + datamoldParams.DstProvider = provider + } + + username, ok := src["username"] + if !ok { + return errors.New("does not exist username") + } + + if p == "src" { + datamoldParams.SrcUsername = username + } else { + datamoldParams.DstUsername = username + } + + password, ok := src["password"] + if !ok { + return errors.New("does not exist password") + } + + if p == "src" { + datamoldParams.SrcPassword = password + } else { + datamoldParams.DstPassword = password + } + + host, ok := src["host"] + if !ok { + return errors.New("does not exist host") + } + + if p == "src" { + datamoldParams.SrcHost = host + } else { + datamoldParams.DstHost = host + } + + port, ok := src["port"] + if !ok { + return errors.New("does not exist port") + } + + if p == "src" { + datamoldParams.SrcPort = port + } else { + datamoldParams.DstPort = port + } + + return nil +} + +func applyOSValue(src map[string]string, p string, datamoldParams *DatamoldParams) error { + provider, ok := src["provider"] + if ok { + if provider != "aws" && provider != "gcp" && provider != "ncp" { + return fmt.Errorf("provider[aws,gcp,ncp] error : %s", provider) + } + } else { + return errors.New("does not exist provider") + } + + if p == "src" { + datamoldParams.SrcProvider = provider + } else { + datamoldParams.DstProvider = provider + } + + if provider == "aws" || provider == "ncp" { + access, ok := src["assessKey"] + if !ok { + return errors.New("does not exist assessKey") + } + + if p == "src" { + datamoldParams.SrcAccessKey = access + } else { + datamoldParams.DstAccessKey = access + } + + secret, ok := src["secretKey"] + if !ok { + return errors.New("does not exist secretKey") + } + + if p == "src" { + datamoldParams.SrcSecretKey = secret + } else { + datamoldParams.DstSecretKey = secret + } + + region, ok := src["region"] + if !ok { + return errors.New("does not exist region") + } + + if p == "src" { + datamoldParams.SrcRegion = region + } else { + datamoldParams.DstRegion = region + } + + bktName, ok := src["bucketName"] + if !ok { + return errors.New("does not exist bucketName") + } + + if p == "src" { + datamoldParams.SrcBucketName = bktName + } else { + datamoldParams.DstBucketName = bktName + } + + if provider == "ncp" { + endpoint, ok := src["endpoint"] + if !ok { + return errors.New("does not exist endpoint") + } + if p == "src" { + datamoldParams.SrcEndpoint = endpoint + } else { + datamoldParams.DstEndpoint = endpoint + } + } + } + + if provider == "gcp" { + cred, ok := src["gcpCredPath"] + if !ok { + return errors.New("does not exist gcpCredPath") + } + if p == "src" { + datamoldParams.SrcGcpCredPath = cred + } else { + datamoldParams.DstGcpCredPath = cred + } + + projectID, ok := src["projectID"] + if !ok { + return errors.New("does not exist projectID") + } + if p == "src" { + datamoldParams.SrcProjectID = projectID + } else { + datamoldParams.DstProjectID = projectID + } + + region, ok := src["region"] + if !ok { + return errors.New("does not exist region") + } + if p == "src" { + datamoldParams.SrcRegion = region + } else { + datamoldParams.DstRegion = region + } + + bktName, ok := src["bucketName"] + if !ok { + return errors.New("does not exist bucketName") + } + if p == "src" { + datamoldParams.SrcBucketName = bktName + } else { + datamoldParams.DstBucketName = bktName + } + } + return nil +} diff --git a/internal/auth/cmdstruct.go b/internal/auth/cmdstruct.go new file mode 100644 index 0000000..af5c097 --- /dev/null +++ b/internal/auth/cmdstruct.go @@ -0,0 +1,52 @@ +package auth + +type DatamoldParams struct { + // credential + CredentialPath string + ConfigData map[string]map[string]map[string]string + TaskTarget bool + + //src + SrcProvider string + SrcAccessKey string + SrcSecretKey string + SrcRegion string + SrcBucketName string + SrcGcpCredPath string + SrcProjectID string + SrcEndpoint string + SrcUsername string + SrcPassword string + SrcHost string + SrcPort string + SrcDBName string + + //dst + DstProvider string + DstAccessKey string + DstSecretKey string + DstRegion string + DstBucketName string + DstGcpCredPath string + DstProjectID string + DstEndpoint string + DstUsername string + DstPassword string + DstHost string + DstPort string + DstDBName string + + // dummy + DstPath string + SqlSize int + CsvSize int + JsonSize int + XmlSize int + TxtSize int + PngSize int + GifSize int + ZipSize int + + DeleteDBList []string + DeleteTableList []string +} diff --git a/internal/auth/nrdb.go b/internal/auth/nrdb.go new file mode 100644 index 0000000..0f1d894 --- /dev/null +++ b/internal/auth/nrdb.go @@ -0,0 +1,183 @@ +package auth + +import ( + "encoding/json" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/cloud-barista/cm-data-mold/service/nrdbc" + "github.com/sirupsen/logrus" +) + +func ImportNRDMFunc(datamoldParams *DatamoldParams) error { + var NRDBC *nrdbc.NRDBController + var err error + if !datamoldParams.TaskTarget { + NRDBC, err = GetSrcNRDMS(datamoldParams) + } else { + NRDBC, err = GetDstNRDMS(datamoldParams) + } + + if err != nil { + logrus.Errorf("NRDBController error importing into nrdbms : %v", err) + return err + } + + jsonList := []string{} + err = filepath.Walk(datamoldParams.DstPath, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Ext(path) == ".json" { + jsonList = append(jsonList, path) + } + return nil + }) + + if err != nil { + logrus.Errorf("Walk error : %v", err) + return err + } + + var srcData []map[string]interface{} + for _, jsonFile := range jsonList { + srcData = []map[string]interface{}{} + + file, err := os.Open(jsonFile) + if err != nil { + logrus.Errorf("file open error : %v", err) + return err + } + defer file.Close() + + if err := json.NewDecoder(file).Decode(&srcData); err != nil { + logrus.Errorf("file decoding error : %v", err) + return err + } + + fileName := filepath.Base(jsonFile) + tableName := fileName[:len(fileName)-len(filepath.Ext(fileName))] + + logrus.Infof("Import start: %s", fileName) + if err := NRDBC.Put(tableName, &srcData); err != nil { + logrus.Error("Put error importing into nrdbms") + return err + } + logrus.Infof("successfully imported : %s", datamoldParams.DstPath) + } + return nil +} + +func ExportNRDMFunc(datamoldParams *DatamoldParams) error { + var NRDBC *nrdbc.NRDBController + var err error + logrus.Infof("User Information") + if !datamoldParams.TaskTarget { + NRDBC, err = GetSrcNRDMS(datamoldParams) + } else { + NRDBC, err = GetDstNRDMS(datamoldParams) + } + if err != nil { + logrus.Errorf("NRDBController error exporting into rdbms : %v", err) + return err + } + + tableList, err := NRDBC.ListTables() + if err != nil { + logrus.Errorf("ListTables error : %v", err) + return err + } + + var dstData []map[string]interface{} + for _, table := range tableList { + logrus.Infof("Export start: %s", table) + dstData = []map[string]interface{}{} + + if err := NRDBC.Get(table, &dstData); err != nil { + logrus.Errorf("Get error : %v", err) + return err + } + + file, err := os.Create(filepath.Join(datamoldParams.DstPath, fmt.Sprintf("%s.json", table))) + if err != nil { + logrus.Errorf("File create error : %v", err) + return err + } + defer file.Close() + + encoder := json.NewEncoder(file) + encoder.SetIndent("", " ") + if err := encoder.Encode(dstData); err != nil { + logrus.Errorf("data encoding error : %v", err) + return err + } + logrus.Infof("successfully exported : %s", file.Name()) + } + logrus.Infof("successfully exported : %s", datamoldParams.DstPath) + return nil +} + +func MigrationNRDMFunc(datamoldParams *DatamoldParams) error { + var srcNRDBC *nrdbc.NRDBController + var srcErr error + var dstNRDBC *nrdbc.NRDBController + var dstErr error + if !datamoldParams.TaskTarget { + logrus.Infof("Source Information") + srcNRDBC, srcErr = GetSrcNRDMS(datamoldParams) + if srcErr != nil { + logrus.Errorf("NRDBController error migration into nrdbms : %v", srcErr) + return srcErr + } + logrus.Infof("Target Information") + dstNRDBC, dstErr = GetDstNRDMS(datamoldParams) + if dstErr != nil { + logrus.Errorf("NRDBController error migration into nrdbms : %v", dstErr) + return dstErr + } + } else { + logrus.Infof("Source Information") + srcNRDBC, srcErr = GetDstNRDMS(datamoldParams) + if srcErr != nil { + logrus.Errorf("NRDBController error migration into nrdbms : %v", srcErr) + return srcErr + } + logrus.Infof("Target Information") + dstNRDBC, dstErr = GetSrcNRDMS(datamoldParams) + if dstErr != nil { + logrus.Errorf("NRDBController error migration into nrdbms : %v", dstErr) + return dstErr + } + } + logrus.Info("Launch NRDBController Copy") + if err := srcNRDBC.Copy(dstNRDBC); err != nil { + logrus.Errorf("Copy error copying into nrdbms : %v", err) + return err + } + logrus.Info("successfully migrationed") + return nil +} + +func DeleteNRDMFunc(datamoldParams *DatamoldParams) error { + var NRDBC *nrdbc.NRDBController + var err error + if !datamoldParams.TaskTarget { + NRDBC, err = GetSrcNRDMS(datamoldParams) + } else { + NRDBC, err = GetDstNRDMS(datamoldParams) + } + if err != nil { + logrus.Errorf("NRDBController error deleting into nrdbms : %v", err) + return err + } + + logrus.Info("Launch NRDBController Delete") + if err := NRDBC.DeleteTables(datamoldParams.DeleteTableList...); err != nil { + logrus.Errorf("Delete error deleting into nrdbms : %v", err) + return err + } + logrus.Info("successfully deleted") + return nil +} diff --git a/internal/auth/os.go b/internal/auth/os.go new file mode 100644 index 0000000..7edf4e6 --- /dev/null +++ b/internal/auth/os.go @@ -0,0 +1,119 @@ +package auth + +import ( + "github.com/cloud-barista/cm-data-mold/service/osc" + "github.com/sirupsen/logrus" +) + +func ImportOSFunc(datamoldParams *DatamoldParams) error { + var OSC *osc.OSController + var err error + logrus.Infof("User Information") + if !datamoldParams.TaskTarget { + OSC, err = GetSrcOS(datamoldParams) + } else { + OSC, err = GetDstOS(datamoldParams) + } + + if err != nil { + logrus.Errorf("OSController error importing into objectstorage : %v", err) + return err + } + + logrus.Info("Launch OSController MPut") + if err := OSC.MPut(datamoldParams.DstPath); err != nil { + logrus.Error("MPut error importing into objectstorage") + return err + } + logrus.Infof("successfully imported : %s", datamoldParams.DstPath) + return nil +} + +func ExportOSFunc(datamoldParams *DatamoldParams) error { + var OSC *osc.OSController + var err error + logrus.Infof("User Information") + if !datamoldParams.TaskTarget { + OSC, err = GetSrcOS(datamoldParams) + } else { + OSC, err = GetDstOS(datamoldParams) + } + if err != nil { + logrus.Errorf("OSController error exporting into objectstorage : %v", err) + return err + } + + logrus.Info("Launch OSController MGet") + if err := OSC.MGet(datamoldParams.DstPath); err != nil { + logrus.Errorf("MGet error exporting into objectstorage : %v", err) + return err + } + logrus.Infof("successfully exported : %s", datamoldParams.DstPath) + return nil +} + +func MigrationOSFunc(datamoldParams *DatamoldParams) error { + var src *osc.OSController + var srcErr error + var dst *osc.OSController + var dstErr error + if !datamoldParams.TaskTarget { + logrus.Infof("Source Information") + src, srcErr = GetSrcOS(datamoldParams) + if srcErr != nil { + logrus.Errorf("OSController error migration into objectstorage : %v", srcErr) + return srcErr + } + logrus.Infof("Target Information") + dst, dstErr = GetDstOS(datamoldParams) + if dstErr != nil { + logrus.Errorf("OSController error migration into objectstorage : %v", dstErr) + return dstErr + } + } else { + logrus.Infof("Source Information") + src, srcErr = GetDstOS(datamoldParams) + if srcErr != nil { + logrus.Errorf("OSController error migration into objectstorage : %v", srcErr) + return srcErr + } + logrus.Infof("Target Information") + dst, dstErr = GetSrcOS(datamoldParams) + if dstErr != nil { + logrus.Errorf("OSController error migration into objectstorage : %v", dstErr) + return dstErr + } + } + + logrus.Info("Launch OSController Copy") + if err := src.Copy(dst); err != nil { + logrus.Errorf("Copy error copying into objectstorage : %v", err) + return err + } + logrus.Info("successfully migrationed") + return nil +} + +func DeleteOSFunc(datamoldParams *DatamoldParams) error { + var OSC *osc.OSController + var err error + logrus.Infof("User Information") + if !datamoldParams.TaskTarget { + OSC, err = GetSrcOS(datamoldParams) + } else { + OSC, err = GetDstOS(datamoldParams) + } + if err != nil { + logrus.Errorf("OSController error deleting into objectstorage : %v", err) + return err + } + + logrus.Info("Launch OSController Delete") + if err := OSC.DeleteBucket(); err != nil { + logrus.Errorf("Delete error deleting into objectstorage : %v", err) + return err + } + logrus.Info("successfully deleted") + + return nil +} diff --git a/internal/auth/rdb.go b/internal/auth/rdb.go new file mode 100644 index 0000000..ab2105c --- /dev/null +++ b/internal/auth/rdb.go @@ -0,0 +1,175 @@ +package auth + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/cloud-barista/cm-data-mold/service/rdbc" + "github.com/sirupsen/logrus" +) + +func ImportRDMFunc(datamoldParams *DatamoldParams) error { + var RDBC *rdbc.RDBController + var err error + logrus.Infof("User Information") + if !datamoldParams.TaskTarget { + RDBC, err = GetSrcRDMS(datamoldParams) + } else { + RDBC, err = GetDstRDMS(datamoldParams) + } + if err != nil { + logrus.Errorf("RDBController error importing into rdbms : %v", err) + return err + } + + sqlList := []string{} + err = filepath.Walk(datamoldParams.DstPath, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Ext(path) == ".sql" { + sqlList = append(sqlList, path) + } + return nil + }) + if err != nil { + logrus.Errorf("Walk error : %v", err) + return err + } + + for _, sqlPath := range sqlList { + data, err := os.ReadFile(sqlPath) + if err != nil { + logrus.Errorf("ReadFile error : %v", err) + return err + } + logrus.Infof("Import start: %s", sqlPath) + if err := RDBC.Put(string(data)); err != nil { + logrus.Error("Put error importing into rdbms") + return err + } + logrus.Infof("Import success: %s", sqlPath) + } + logrus.Infof("successfully imported : %s", datamoldParams.DstPath) + return nil +} + +func ExportRDMFunc(datamoldParams *DatamoldParams) error { + var RDBC *rdbc.RDBController + var err error + logrus.Infof("User Information") + if !datamoldParams.TaskTarget { + RDBC, err = GetSrcRDMS(datamoldParams) + } else { + RDBC, err = GetDstRDMS(datamoldParams) + } + if err != nil { + logrus.Errorf("RDBController error exporting into rdbms : %v", err) + return err + } + + err = os.MkdirAll(datamoldParams.DstPath, 0755) + if err != nil { + logrus.Errorf("MkdirAll error : %v", err) + return err + } + + dbList := []string{} + if err := RDBC.ListDB(&dbList); err != nil { + logrus.Errorf("ListDB error : %v", err) + return err + } + + var sqlData string + for _, db := range dbList { + sqlData = "" + logrus.Infof("Export start: %s", db) + if err := RDBC.Get(db, &sqlData); err != nil { + logrus.Errorf("Get error : %v", err) + return err + } + + file, err := os.Create(filepath.Join(datamoldParams.DstPath, fmt.Sprintf("%s.sql", db))) + if err != nil { + logrus.Errorf("File create error : %v", err) + return err + } + defer file.Close() + + _, err = file.WriteString(sqlData) + if err != nil { + logrus.Errorf("File write error : %v", err) + return err + } + logrus.Infof("successfully exported : %s", file.Name()) + file.Close() + } + logrus.Infof("successfully exported : %s", datamoldParams.DstPath) + return nil +} + +func MigrationRDMFunc(datamoldParams *DatamoldParams) error { + var srcRDBC *rdbc.RDBController + var srcErr error + var dstRDBC *rdbc.RDBController + var dstErr error + if !datamoldParams.TaskTarget { + logrus.Infof("Source Information") + srcRDBC, srcErr = GetSrcRDMS(datamoldParams) + if srcErr != nil { + logrus.Errorf("RDBController error migration into rdbms : %v", srcErr) + return srcErr + } + logrus.Infof("Target Information") + dstRDBC, dstErr = GetDstRDMS(datamoldParams) + if dstErr != nil { + logrus.Errorf("RDBController error migration into rdbms : %v", dstErr) + return dstErr + } + } else { + logrus.Infof("Source Information") + srcRDBC, srcErr = GetDstRDMS(datamoldParams) + if srcErr != nil { + logrus.Errorf("RDBController error migration into rdbms : %v", srcErr) + return srcErr + } + logrus.Infof("Target Information") + dstRDBC, dstErr = GetSrcRDMS(datamoldParams) + if dstErr != nil { + logrus.Errorf("RDBController error migration into rdbms : %v", dstErr) + return dstErr + } + } + + logrus.Info("Launch RDBController Copy") + if err := srcRDBC.Copy(dstRDBC); err != nil { + logrus.Errorf("Copy error copying into rdbms : %v", err) + return err + } + logrus.Info("successfully migrationed") + return nil +} + +func DeleteRDMFunc(datamoldParams *DatamoldParams) error { + var RDBC *rdbc.RDBController + var err error + if !datamoldParams.TaskTarget { + RDBC, err = GetSrcRDMS(datamoldParams) + } else { + RDBC, err = GetDstRDMS(datamoldParams) + } + if err != nil { + logrus.Errorf("RDBController error deleting into rdbms : %v", err) + return err + } + + logrus.Info("Launch RDBController Delete") + if err := RDBC.DeleteDB(datamoldParams.DeleteDBList...); err != nil { + logrus.Errorf("Delete error deleting into rdbms : %v", err) + return err + } + logrus.Info("successfully deleted") + return nil +} diff --git a/internal/execfunc/dummycreate.go b/internal/execfunc/dummycreate.go new file mode 100644 index 0000000..b8efffd --- /dev/null +++ b/internal/execfunc/dummycreate.go @@ -0,0 +1,85 @@ +package execfunc + +import ( + "github.com/cloud-barista/cm-data-mold/internal/auth" + "github.com/cloud-barista/cm-data-mold/pkg/dummy/semistructured" + "github.com/cloud-barista/cm-data-mold/pkg/dummy/structured" + "github.com/cloud-barista/cm-data-mold/pkg/dummy/unstructured" + "github.com/sirupsen/logrus" +) + +func DummyCreate(datamoldParams auth.DatamoldParams) error { + logrus.Info("check directory paths") + if datamoldParams.SqlSize != 0 { + logrus.Info("start sql generation") + if err := structured.GenerateRandomSQL(datamoldParams.DstPath, datamoldParams.SqlSize); err != nil { + logrus.Error("failed to generate sql") + return err + } + logrus.Infof("successfully generated sql : %s", datamoldParams.DstPath) + } + + if datamoldParams.CsvSize != 0 { + logrus.Info("start csv generation") + if err := structured.GenerateRandomCSV(datamoldParams.DstPath, datamoldParams.CsvSize); err != nil { + logrus.Error("failed to generate csv") + return err + } + logrus.Infof("successfully generated csv : %s", datamoldParams.DstPath) + } + + if datamoldParams.JsonSize != 0 { + logrus.Info("start json generation") + if err := semistructured.GenerateRandomJSON(datamoldParams.DstPath, datamoldParams.JsonSize); err != nil { + logrus.Error("failed to generate json") + return err + } + logrus.Infof("successfully generated json : %s", datamoldParams.DstPath) + } + + if datamoldParams.XmlSize != 0 { + logrus.Info("start xml generation") + if err := semistructured.GenerateRandomXML(datamoldParams.DstPath, datamoldParams.XmlSize); err != nil { + logrus.Error("failed to generate xml") + return err + } + logrus.Infof("successfully generated xml : %s", datamoldParams.DstPath) + } + + if datamoldParams.TxtSize != 0 { + logrus.Info("start txt generation") + if err := unstructured.GenerateRandomTXT(datamoldParams.DstPath, datamoldParams.TxtSize); err != nil { + logrus.Error("failed to generate txt") + return err + } + logrus.Infof("successfully generated txt : %s", datamoldParams.DstPath) + } + + if datamoldParams.PngSize != 0 { + logrus.Info("start png generation") + if err := unstructured.GenerateRandomPNGImage(datamoldParams.DstPath, datamoldParams.PngSize); err != nil { + logrus.Error("failed to generate png") + return err + } + logrus.Infof("successfully generated png : %s", datamoldParams.DstPath) + } + + if datamoldParams.GifSize != 0 { + logrus.Info("start gif generation") + if err := unstructured.GenerateRandomGIF(datamoldParams.DstPath, datamoldParams.GifSize); err != nil { + logrus.Error("failed to generate gif") + return err + } + logrus.Infof("successfully generated gif : %s", datamoldParams.DstPath) + } + + if datamoldParams.ZipSize != 0 { + logrus.Info("start zip generation") + if err := unstructured.GenerateRandomZIP(datamoldParams.DstPath, datamoldParams.ZipSize); err != nil { + logrus.Error("failed to generate zip") + return err + } + logrus.Infof("successfully generated zip : %s", datamoldParams.DstPath) + } + return nil +} diff --git a/internal/logformatter/logformatter.go b/internal/log/log.go similarity index 63% rename from internal/logformatter/logformatter.go rename to internal/log/log.go index 42740a3..ed7a857 100644 --- a/internal/logformatter/logformatter.go +++ b/internal/log/log.go @@ -1,12 +1,25 @@ -package logformatter +package log import ( "fmt" + "io" + "os" "strings" "github.com/sirupsen/logrus" ) +func LogFile() { + logFile, err := os.OpenFile("./datamold.log", os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + logrus.WithError(err).Fatal("Failed to create log file") + } + + logrus.SetLevel(logrus.DebugLevel) + logrus.SetFormatter(&CustomTextFormatter{}) + logrus.SetOutput(io.MultiWriter(os.Stdout, logFile)) +} + type CustomTextFormatter struct { CmdName string JobName string diff --git a/pkg/dummy/semistructed/json.go b/pkg/dummy/semistructured/json.go similarity index 99% rename from pkg/dummy/semistructed/json.go rename to pkg/dummy/semistructured/json.go index 83208f0..c89b098 100644 --- a/pkg/dummy/semistructed/json.go +++ b/pkg/dummy/semistructured/json.go @@ -1,4 +1,4 @@ -package semistructed +package semistructured import ( "encoding/json" diff --git a/pkg/dummy/semistructed/semistructed_test.go b/pkg/dummy/semistructured/semistructed_test.go similarity index 61% rename from pkg/dummy/semistructed/semistructed_test.go rename to pkg/dummy/semistructured/semistructed_test.go index 8319c08..23dbff8 100644 --- a/pkg/dummy/semistructed/semistructed_test.go +++ b/pkg/dummy/semistructured/semistructed_test.go @@ -1,16 +1,16 @@ -package semistructed_test +package semistructured_test import ( "testing" "fmt" - "github.com/cloud-barista/cm-data-mold/pkg/dummy/semistructed" + "github.com/cloud-barista/cm-data-mold/pkg/dummy/semistructured" ) func TestJSON(t *testing.T) { // Enter the directory path and total data size (in GB) to store json dummy data - if err := semistructed.GenerateRandomJSON("json-dummy-directory-path", 1); err != nil { + if err := semistructured.GenerateRandomJSON("json-dummy-directory-path", 1); err != nil { fmt.Printf("test json error : %v", err) panic(err) } @@ -18,7 +18,7 @@ func TestJSON(t *testing.T) { func TestXML(t *testing.T) { // Enter the directory path and total data size in GB to store xml dummy data - if err := semistructed.GenerateRandomXML("xml-dummy-directory-path", 1); err != nil { + if err := semistructured.GenerateRandomXML("xml-dummy-directory-path", 1); err != nil { fmt.Printf("test xml error : %v", err) panic(err) } diff --git a/pkg/dummy/semistructed/xml.go b/pkg/dummy/semistructured/xml.go similarity index 99% rename from pkg/dummy/semistructed/xml.go rename to pkg/dummy/semistructured/xml.go index 987535e..8275516 100644 --- a/pkg/dummy/semistructed/xml.go +++ b/pkg/dummy/semistructured/xml.go @@ -1,4 +1,4 @@ -package semistructed +package semistructured import ( "encoding/xml" diff --git a/pkg/dummy/structed/csv.go b/pkg/dummy/structured/csv.go similarity index 99% rename from pkg/dummy/structed/csv.go rename to pkg/dummy/structured/csv.go index 1500ebf..65d038c 100644 --- a/pkg/dummy/structed/csv.go +++ b/pkg/dummy/structured/csv.go @@ -1,4 +1,4 @@ -package structed +package structured import ( "encoding/csv" diff --git a/pkg/dummy/structed/sql.go b/pkg/dummy/structured/sql.go similarity index 99% rename from pkg/dummy/structed/sql.go rename to pkg/dummy/structured/sql.go index e187beb..018f4cf 100644 --- a/pkg/dummy/structed/sql.go +++ b/pkg/dummy/structured/sql.go @@ -1,4 +1,4 @@ -package structed +package structured import ( "bytes" diff --git a/pkg/dummy/structed/structed_test.go b/pkg/dummy/structured/structed_test.go similarity index 55% rename from pkg/dummy/structed/structed_test.go rename to pkg/dummy/structured/structed_test.go index 36acb63..80ea4a3 100644 --- a/pkg/dummy/structed/structed_test.go +++ b/pkg/dummy/structured/structed_test.go @@ -1,4 +1,4 @@ -package structed_test +package structured_test import ( "path/filepath" @@ -6,12 +6,12 @@ import ( "fmt" - "github.com/cloud-barista/cm-data-mold/pkg/dummy/structed" + "github.com/cloud-barista/cm-data-mold/pkg/dummy/structured" ) func TestCSV(t *testing.T) { // Enter the directory path and total data size in GB to store csv dummy data - if err := structed.GenerateRandomCSV(filepath.Join("csv-dummy-directory-path", "csv"), 100); err != nil { + if err := structured.GenerateRandomCSV(filepath.Join("csv-dummy-directory-path", "csv"), 100); err != nil { fmt.Printf("test csv error : %v", err) panic(err) } @@ -20,7 +20,7 @@ func TestCSV(t *testing.T) { func TestSQL(t *testing.T) { // Enter the directory path and total data size in GB to store sql dummy data - if err := structed.GenerateRandomSQL(filepath.Join("sql-dummy-directory-path", "sql"), 100); err != nil { + if err := structured.GenerateRandomSQL(filepath.Join("sql-dummy-directory-path", "sql"), 100); err != nil { fmt.Printf("test sql error : %v", err) panic(err) } diff --git a/pkg/dummy/unstructed/gif.go b/pkg/dummy/unstructured/gif.go similarity index 99% rename from pkg/dummy/unstructed/gif.go rename to pkg/dummy/unstructured/gif.go index da4f465..0ca24f8 100644 --- a/pkg/dummy/unstructed/gif.go +++ b/pkg/dummy/unstructured/gif.go @@ -1,4 +1,4 @@ -package unstructed +package unstructured import ( "fmt" diff --git a/pkg/dummy/unstructed/img.go b/pkg/dummy/unstructured/img.go similarity index 98% rename from pkg/dummy/unstructed/img.go rename to pkg/dummy/unstructured/img.go index 02f9721..9a58006 100644 --- a/pkg/dummy/unstructed/img.go +++ b/pkg/dummy/unstructured/img.go @@ -1,4 +1,4 @@ -package unstructed +package unstructured import ( "fmt" diff --git a/pkg/dummy/unstructed/txt.go b/pkg/dummy/unstructured/txt.go similarity index 98% rename from pkg/dummy/unstructed/txt.go rename to pkg/dummy/unstructured/txt.go index 6f05a88..71217ad 100644 --- a/pkg/dummy/unstructed/txt.go +++ b/pkg/dummy/unstructured/txt.go @@ -1,4 +1,4 @@ -package unstructed +package unstructured import ( "fmt" diff --git a/pkg/dummy/unstructed/unstructed_test.go b/pkg/dummy/unstructured/unstructed_test.go similarity index 61% rename from pkg/dummy/unstructed/unstructed_test.go rename to pkg/dummy/unstructured/unstructed_test.go index 2c1195c..ee0d720 100644 --- a/pkg/dummy/unstructed/unstructed_test.go +++ b/pkg/dummy/unstructured/unstructed_test.go @@ -1,15 +1,15 @@ -package unstructed_test +package unstructured_test import ( "fmt" "testing" - "github.com/cloud-barista/cm-data-mold/pkg/dummy/unstructed" + "github.com/cloud-barista/cm-data-mold/pkg/dummy/unstructured" ) func TestIMG(t *testing.T) { // Enter the directory path and total data size, in GB, to store the img dummy data - if err := unstructed.GenerateRandomPNGImage("img-dummy-directory-path", 1); err != nil { + if err := unstructured.GenerateRandomPNGImage("img-dummy-directory-path", 1); err != nil { fmt.Printf("test img error : %v", err) panic(err) } @@ -17,7 +17,7 @@ func TestIMG(t *testing.T) { func TestGIF(t *testing.T) { // Enter the directory path and total data size in GB to store gif dummy data - if err := unstructed.GenerateRandomPNGImage("gif-dummy-directory-path", 1); err != nil { + if err := unstructured.GenerateRandomPNGImage("gif-dummy-directory-path", 1); err != nil { fmt.Printf("test gif error : %v", err) panic(err) } @@ -25,7 +25,7 @@ func TestGIF(t *testing.T) { func TestTXT(t *testing.T) { // Enter the directory path and total data size, in GB, to store txt dummy data - if err := unstructed.GenerateRandomTXT("txt-dummy-directory-path", 1); err != nil { + if err := unstructured.GenerateRandomTXT("txt-dummy-directory-path", 1); err != nil { fmt.Printf("test txt error : %v", err) panic(err) } @@ -33,7 +33,7 @@ func TestTXT(t *testing.T) { func TestZIP(t *testing.T) { // Enter the directory path and total data size in GB to store zip dummy data - if err := unstructed.GenerateRandomTXT("zip-dummy-directory-path", 1); err != nil { + if err := unstructured.GenerateRandomTXT("zip-dummy-directory-path", 1); err != nil { fmt.Printf("test zip error : %v", err) panic(err) } diff --git a/pkg/dummy/unstructed/zip.go b/pkg/dummy/unstructured/zip.go similarity index 99% rename from pkg/dummy/unstructed/zip.go rename to pkg/dummy/unstructured/zip.go index 072d56c..0cd6bcc 100644 --- a/pkg/dummy/unstructed/zip.go +++ b/pkg/dummy/unstructured/zip.go @@ -1,4 +1,4 @@ -package unstructed +package unstructured import ( "archive/zip" diff --git a/pkg/objectstorage/gcsfs/gcsfs.go b/pkg/objectstorage/gcpfs/gcpfs.go similarity index 85% rename from pkg/objectstorage/gcsfs/gcsfs.go rename to pkg/objectstorage/gcpfs/gcpfs.go index 054e532..1c5cb70 100644 --- a/pkg/objectstorage/gcsfs/gcsfs.go +++ b/pkg/objectstorage/gcpfs/gcpfs.go @@ -1,4 +1,4 @@ -package gcsfs +package gcpfs import ( "context" @@ -9,7 +9,7 @@ import ( "google.golang.org/api/iterator" ) -type GCSfs struct { +type GCPfs struct { provider utils.Provider projectID string bucketName string @@ -21,7 +21,7 @@ type GCSfs struct { } // Creating a Bucket -func (f *GCSfs) CreateBucket() error { +func (f *GCPfs) CreateBucket() error { _, err := f.bktclient.Attrs(f.ctx) if err != nil { if err == storage.ErrBucketNotExist { @@ -38,7 +38,7 @@ func (f *GCSfs) CreateBucket() error { // Delete Bucket // // Check and delete all objects in the bucket and delete the bucket -func (f *GCSfs) DeleteBucket() error { +func (f *GCPfs) DeleteBucket() error { iter := f.bktclient.Objects(f.ctx, &storage.Query{}) for { attr, err := iter.Next() @@ -57,7 +57,7 @@ func (f *GCSfs) DeleteBucket() error { } // Open function -func (f *GCSfs) Open(name string) (io.ReadCloser, error) { +func (f *GCPfs) Open(name string) (io.ReadCloser, error) { r, err := f.bktclient.Object(name).NewReader(f.ctx) if err != nil { return nil, err @@ -66,12 +66,12 @@ func (f *GCSfs) Open(name string) (io.ReadCloser, error) { } // Create function -func (f *GCSfs) Create(name string) (io.WriteCloser, error) { +func (f *GCPfs) Create(name string) (io.WriteCloser, error) { return f.bktclient.Object(name).NewWriter(f.ctx), nil } // Look up the list of objects in your bucket -func (f *GCSfs) ObjectList() ([]*utils.Object, error) { +func (f *GCPfs) ObjectList() ([]*utils.Object, error) { var objList []*utils.Object it := f.bktclient.Objects(f.ctx, nil) for { @@ -95,8 +95,8 @@ func (f *GCSfs) ObjectList() ([]*utils.Object, error) { return objList, nil } -func New(client *storage.Client, projectID, bucketName string, region string) *GCSfs { - gfs := &GCSfs{ +func New(client *storage.Client, projectID, bucketName string, region string) *GCPfs { + gfs := &GCPfs{ ctx: context.TODO(), bucketName: bucketName, client: client, diff --git a/service/osc/osc_test.go b/service/osc/osc_test.go index 325413e..724beda 100644 --- a/service/osc/osc_test.go +++ b/service/osc/osc_test.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/cloud-barista/cm-data-mold/pkg/objectstorage/gcsfs" + "github.com/cloud-barista/cm-data-mold/pkg/objectstorage/gcpfs" "github.com/cloud-barista/cm-data-mold/pkg/objectstorage/s3fs" "github.com/cloud-barista/cm-data-mold/pkg/utils" "github.com/cloud-barista/cm-data-mold/service/osc" @@ -38,7 +38,7 @@ func TestMain(m *testing.M) { panic(err) } - // s3 to gcs + // s3 to gcp if err := awsosc.Copy(gcposc); err != nil { panic(err) } @@ -99,5 +99,5 @@ func GCPInfo(projectID, credentialsFile, region, bucketName string) (*osc.OSCont return nil, err } - return osc.New(gcsfs.New(client, projectID, bucketName, region)) + return osc.New(gcpfs.New(client, projectID, bucketName, region)) } diff --git a/web/js/scripts.js b/web/js/scripts.js index 05d69bc..ab32f48 100644 --- a/web/js/scripts.js +++ b/web/js/scripts.js @@ -51,7 +51,7 @@ function generateFormSubmit() { console.log(url); let req; - if (target == "gcs" || target == "firestore") { + if (target == "gcp" || target == "firestore") { req = { method: 'POST', body: payload }; } else { req = { method: 'POST', body: jsonData }; diff --git a/web/templates/content.html b/web/templates/content.html index 5db4283..378dd03 100644 --- a/web/templates/content.html +++ b/web/templates/content.html @@ -19,12 +19,12 @@ {{ template "gen-s3.html" . }} {{ end }} - {{ if eq .Content "Generate-GCS" }} - {{ template "gen-gcs.html" . }} + {{ if eq .Content "Generate-GCP" }} + {{ template "gen-gcp.html" . }} {{ end }} - {{ if eq .Content "Generate-NCS" }} - {{ template "gen-ncs.html" . }} + {{ if eq .Content "Generate-NCP" }} + {{ template "gen-ncp.html" . }} {{ end }} {{ if eq .Content "Generate-MySQL" }} @@ -50,24 +50,24 @@ {{ template "mig-linuxToS3.html" . }} {{ end }} - {{ if eq .Content "Migration-Linux-GCS" }} - {{ template "mig-linuxToGcs.html" . }} + {{ if eq .Content "Migration-Linux-GCP" }} + {{ template "mig-linuxToGcp.html" . }} {{ end }} - {{ if eq .Content "Migration-Linux-NCS" }} - {{ template "mig-linuxToNcs.html" . }} + {{ if eq .Content "Migration-Linux-NCP" }} + {{ template "mig-linuxToNcp.html" . }} {{ end }} {{ if eq .Content "Migration-Windows-S3" }} {{ template "mig-windowsToS3.html" . }} {{ end }} - {{ if eq .Content "Migration-Windows-GCS" }} - {{ template "mig-windowsToGcs.html" . }} + {{ if eq .Content "Migration-Windows-GCP" }} + {{ template "mig-windowsToGcp.html" . }} {{ end }} - {{ if eq .Content "Migration-Windows-NCS" }} - {{ template "mig-windowsToNcs.html" . }} + {{ if eq .Content "Migration-Windows-NCP" }} + {{ template "mig-windowsToNcp.html" . }} {{ end }} {{ if eq .Content "Migration-MySQL" }} @@ -85,46 +85,46 @@ {{ template "mig-s3ToWindows.html" . }} {{ end }} - {{ if eq .Content "Migration-S3-GCS" }} - {{ template "mig-s3ToGcs.html" . }} + {{ if eq .Content "Migration-S3-GCP" }} + {{ template "mig-s3ToGcp.html" . }} {{ end }} - {{ if eq .Content "Migration-S3-NCS" }} - {{ template "mig-s3ToNcs.html" . }} + {{ if eq .Content "Migration-S3-NCP" }} + {{ template "mig-s3ToNcp.html" . }} {{ end }} - {{ if eq .Content "Migration-GCS-Linux" }} - {{ template "mig-gcsToLinux.html" . }} + {{ if eq .Content "Migration-GCP-Linux" }} + {{ template "mig-gcpToLinux.html" . }} {{ end }} - {{ if eq .Content "Migration-GCS-Windows" }} - {{ template "mig-gcsToWindows.html" . }} + {{ if eq .Content "Migration-GCP-Windows" }} + {{ template "mig-gcpToWindows.html" . }} {{ end }} - {{ if eq .Content "Migration-GCS-S3" }} - {{ template "mig-gcsToS3.html" . }} + {{ if eq .Content "Migration-GCP-S3" }} + {{ template "mig-gcpToS3.html" . }} {{ end }} - {{ if eq .Content "Migration-GCS-NCS" }} - {{ template "mig-gcsToNcs.html" . }} + {{ if eq .Content "Migration-GCP-NCP" }} + {{ template "mig-gcpToNcp.html" . }} {{ end }} - {{ if eq .Content "Migration-NCS-Linux" }} - {{ template "mig-ncsToLinux.html" . }} + {{ if eq .Content "Migration-NCP-Linux" }} + {{ template "mig-ncpToLinux.html" . }} {{ end }} - {{ if eq .Content "Migration-NCS-Windows" }} - {{ template "mig-ncsToWindows.html" . }} + {{ if eq .Content "Migration-NCP-Windows" }} + {{ template "mig-ncpToWindows.html" . }} {{ end }} - {{ if eq .Content "Migration-NCS-S3" }} - {{ template "mig-ncsToS3.html" . }} + {{ if eq .Content "Migration-NCP-S3" }} + {{ template "mig-ncpToS3.html" . }} {{ end }} - {{ if eq .Content "Migration-NCS-GCS" }} - {{ template "mig-ncsToGcs.html" . }} + {{ if eq .Content "Migration-NCP-GCP" }} + {{ template "mig-ncpToGcp.html" . }} {{ end }} diff --git a/web/templates/gen-firestore.html b/web/templates/gen-firestore.html index 53baedc..b96a4eb 100644 --- a/web/templates/gen-firestore.html +++ b/web/templates/gen-firestore.html @@ -29,7 +29,7 @@

데이터 생성 Firestore

- +
diff --git a/web/templates/gen-gcs.html b/web/templates/gen-gcp.html similarity index 84% rename from web/templates/gen-gcs.html rename to web/templates/gen-gcp.html index dc832a4..92b6322 100644 --- a/web/templates/gen-gcs.html +++ b/web/templates/gen-gcp.html @@ -1,4 +1,4 @@ -{{ define "gen-gcs.html" }} +{{ define "gen-gcp.html" }}

데이터 생성 GCP

- +
- {{ range $index, $value := .Regions }} {{ end }} - +
- - + +
@@ -50,7 +50,7 @@

데이터 생성 GCP

{{ template "gen-data.html" . }} - +
diff --git a/web/templates/gen-ncs.html b/web/templates/gen-ncp.html similarity index 74% rename from web/templates/gen-ncs.html rename to web/templates/gen-ncp.html index ff997cb..0971530 100644 --- a/web/templates/gen-ncs.html +++ b/web/templates/gen-ncp.html @@ -1,4 +1,4 @@ -{{ define "gen-ncs.html" }} +{{ define "gen-ncp.html" }}

데이터 생성 NCP