From 301452f8b1f3b4c13110e0e544d1357c603b5968 Mon Sep 17 00:00:00 2001 From: tykim96 Date: Fri, 3 Nov 2023 11:17:05 +0900 Subject: [PATCH 1/5] refactor: Changing Directory Location Change pkg/dummy by incorrectly uploading to pkg --- pkg/{ => dummy}/semistructed/json.go | 0 pkg/{ => dummy}/semistructed/xml.go | 0 pkg/{ => dummy}/structed/csv.go | 0 pkg/{ => dummy}/structed/sql.go | 0 pkg/{ => dummy}/unstructed/gif.go | 0 pkg/{ => dummy}/unstructed/img.go | 0 pkg/{ => dummy}/unstructed/txt.go | 0 pkg/{ => dummy}/unstructed/zip.go | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename pkg/{ => dummy}/semistructed/json.go (100%) rename pkg/{ => dummy}/semistructed/xml.go (100%) rename pkg/{ => dummy}/structed/csv.go (100%) rename pkg/{ => dummy}/structed/sql.go (100%) rename pkg/{ => dummy}/unstructed/gif.go (100%) rename pkg/{ => dummy}/unstructed/img.go (100%) rename pkg/{ => dummy}/unstructed/txt.go (100%) rename pkg/{ => dummy}/unstructed/zip.go (100%) diff --git a/pkg/semistructed/json.go b/pkg/dummy/semistructed/json.go similarity index 100% rename from pkg/semistructed/json.go rename to pkg/dummy/semistructed/json.go diff --git a/pkg/semistructed/xml.go b/pkg/dummy/semistructed/xml.go similarity index 100% rename from pkg/semistructed/xml.go rename to pkg/dummy/semistructed/xml.go diff --git a/pkg/structed/csv.go b/pkg/dummy/structed/csv.go similarity index 100% rename from pkg/structed/csv.go rename to pkg/dummy/structed/csv.go diff --git a/pkg/structed/sql.go b/pkg/dummy/structed/sql.go similarity index 100% rename from pkg/structed/sql.go rename to pkg/dummy/structed/sql.go diff --git a/pkg/unstructed/gif.go b/pkg/dummy/unstructed/gif.go similarity index 100% rename from pkg/unstructed/gif.go rename to pkg/dummy/unstructed/gif.go diff --git a/pkg/unstructed/img.go b/pkg/dummy/unstructed/img.go similarity index 100% rename from pkg/unstructed/img.go rename to pkg/dummy/unstructed/img.go diff --git a/pkg/unstructed/txt.go b/pkg/dummy/unstructed/txt.go similarity index 100% rename from pkg/unstructed/txt.go rename to pkg/dummy/unstructed/txt.go diff --git a/pkg/unstructed/zip.go b/pkg/dummy/unstructed/zip.go similarity index 100% rename from pkg/unstructed/zip.go rename to pkg/dummy/unstructed/zip.go From 22e3ee50aedfbd93bfc2ed91786be88fe3a8a21a Mon Sep 17 00:00:00 2001 From: tykim96 Date: Fri, 3 Nov 2023 11:18:17 +0900 Subject: [PATCH 2/5] feat: Add Object Storage Controller --- service/osc/copy.go | 87 +++++++++++++++++++++++ service/osc/get.go | 167 ++++++++++++++++++++++++++++++++++++++++++++ service/osc/osc.go | 57 +++++++++++++++ service/osc/put.go | 159 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 470 insertions(+) create mode 100644 service/osc/copy.go create mode 100644 service/osc/get.go create mode 100644 service/osc/osc.go create mode 100644 service/osc/put.go diff --git a/service/osc/copy.go b/service/osc/copy.go new file mode 100644 index 0000000..c48853f --- /dev/null +++ b/service/osc/copy.go @@ -0,0 +1,87 @@ +package osc + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/cloud-barista/cm-data-mold/pkg/utils" +) + +func (src *OSController) Copy(dst *OSController) error { + dst.osfs.CreateBucket() + + objList, err := src.osfs.ObjectList() + if err != nil { + return nil + } + + jobs := make(chan utils.Object) + resultChan := make(chan error) + + var wg sync.WaitGroup + for i := 0; i < src.threads; i++ { + wg.Add(1) + go func() { + defer wg.Done() + copyWorker(src, dst, jobs, resultChan) + }() + } + + for _, obj := range objList { + jobs <- *obj + } + close(jobs) + + go func() { + wg.Wait() + close(resultChan) + }() + + for err := range resultChan { + if err != nil { + return err + } + } + + return nil +} + +func copyWorker(src *OSController, dst *OSController, jobs chan utils.Object, resultChan chan<- error) { + for obj := range jobs { + srcFile, err := src.osfs.Open(obj.Key) + if err != nil { + resultChan <- err + continue + } + + fmt.Println(obj.Key) + dstFile, err := dst.osfs.Create(obj.Key) + if err != nil { + resultChan <- err + continue + } + + n, err := io.Copy(dstFile, srcFile) + if err != nil { + resultChan <- err + continue + } + + if n != obj.Size { + resultChan <- errors.New("copy failed") + continue + } + + if err := srcFile.Close(); err != nil { + resultChan <- err + continue + } + + if err := dstFile.Close(); err != nil { + resultChan <- err + continue + } + } +} diff --git a/service/osc/get.go b/service/osc/get.go new file mode 100644 index 0000000..26364e7 --- /dev/null +++ b/service/osc/get.go @@ -0,0 +1,167 @@ +package osc + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/cloud-barista/cm-data-mold/pkg/utils" +) + +func (osc *OSController) MGet(dirPath string) error { + if fileExists(dirPath) { + return errors.New("directory does not exist") + } + + err := os.MkdirAll(dirPath, 0755) + if err != nil { + return err + } + + var fileList []*utils.Object + + err = filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if !info.IsDir() { + fileList = append(fileList, &utils.Object{ + ChecksumAlgorithm: []string{}, + ETag: "", + Key: path, + LastModified: info.ModTime(), + Size: info.Size(), + StorageClass: "Standard", + }) + } + + return nil + }) + + if err != nil { + return err + } + + objList, err := osc.osfs.ObjectList() + if err != nil { + return err + } + + downlaodList := getDownloadList(fileList, objList, dirPath) + + jobs := make(chan utils.Object, len(downlaodList)) + resultChan := make(chan error, len(downlaodList)) + + var wg sync.WaitGroup + for i := 0; i < osc.threads; i++ { + wg.Add(1) + go func() { + defer wg.Done() + mGetWorker(osc, dirPath, jobs, resultChan) + }() + } + + for _, obj := range downlaodList { + jobs <- *obj + } + close(jobs) + + go func() { + wg.Wait() + close(resultChan) + }() + + for err := range resultChan { + if err != nil { + return err + } + } + + return nil +} + +func getDownloadList(fileList, objList []*utils.Object, dirPath string) []*utils.Object { + downloadList := []*utils.Object{} + + for _, obj := range objList { + chk := false + for _, file := range fileList { + fileName, _ := filepath.Rel(dirPath, file.Key) + if obj.Key == strings.ReplaceAll(fileName, "\\", "/") { + chk = true + if obj.Size != file.Size { + downloadList = append(downloadList, obj) + } + break + } + } + if !chk { + downloadList = append(downloadList, obj) + } + } + + return downloadList +} + +func combinePaths(basePath, relativePath string) (string, error) { + bName := filepath.Base(basePath) + + parts := strings.Split(relativePath, "/") + if bName == parts[0] { + return filepath.Join(basePath, strings.Join(parts[1:], "/")), nil + } + return filepath.Join(basePath, relativePath), nil +} + +func mGetWorker(osc *OSController, dirPath string, jobs chan utils.Object, resultChan chan<- error) { + for obj := range jobs { + src, err := osc.osfs.Open(obj.Key) + if err != nil { + resultChan <- err + continue + } + defer src.Close() + + fileName, err := combinePaths(dirPath, obj.Key) + if err != nil { + resultChan <- err + continue + } + + err = os.MkdirAll(filepath.Dir(fileName), 0755) + if err != nil { + resultChan <- err + continue + } + + fmt.Println(fileName) + + dst, err := os.Create(fileName) + if err != nil { + resultChan <- err + continue + } + defer dst.Close() + + n, err := io.Copy(dst, src) + if err != nil { + resultChan <- err + continue + } + + if n != obj.Size { + resultChan <- errors.New("get failed") + continue + } + + dst.Close() + src.Close() + + resultChan <- nil + } +} diff --git a/service/osc/osc.go b/service/osc/osc.go new file mode 100644 index 0000000..de61ef4 --- /dev/null +++ b/service/osc/osc.go @@ -0,0 +1,57 @@ +package osc + +import ( + "io" + + "github.com/cloud-barista/cm-data-mold/pkg/utils" +) + +type OSFS interface { + CreateBucket() error + DeleteBucket() error + ObjectList() ([]*utils.Object, error) + + Open(name string) (io.ReadCloser, error) + Create(name string) (io.WriteCloser, error) +} + +type OSController struct { + osfs OSFS + + threads int +} + +func (osc *OSController) CreateBucket() error { + return osc.osfs.CreateBucket() +} + +func (osc *OSController) DeleteBucket() error { + return osc.osfs.DeleteBucket() +} + +func (osc *OSController) ObjectList() ([]*utils.Object, error) { + return osc.osfs.ObjectList() +} + +type Option func(*OSController) + +func WithThreads(count int) Option { + return func(o *OSController) { + if count >= 1 { + o.threads = count + } + } +} + +func New(osfs OSFS, opts ...Option) (*OSController, error) { + osc := &OSController{ + osfs: osfs, + threads: 10, + } + + for _, opt := range opts { + opt(osc) + } + + return osc, nil +} diff --git a/service/osc/put.go b/service/osc/put.go new file mode 100644 index 0000000..9abcb10 --- /dev/null +++ b/service/osc/put.go @@ -0,0 +1,159 @@ +package osc + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/cloud-barista/cm-data-mold/pkg/utils" +) + +func fileExists(filePath string) bool { + if fi, err := os.Stat(filePath); os.IsExist(err) { + return !fi.IsDir() + } + return false +} + +func (osc *OSController) Put(filePath string) error { + if !fileExists(filePath) { + return errors.New("file does not exist") + } + + src, err := os.Open(filePath) + if err != nil { + return err + } + defer src.Close() + + dst, err := osc.osfs.Create(filepath.Base(filePath)) + if err != nil { + return err + } + + n, err := io.Copy(dst, src) + if err != nil { + return err + } + + sinfo, err := src.Stat() + if err != nil { + return err + } + + if n != sinfo.Size() { + return errors.New("put failed") + } + + return nil +} + +func (osc *OSController) MPut(dirPath string) error { + osc.osfs.CreateBucket() + + if fileExists(dirPath) { + return errors.New("directory does not exist") + } + + var objList []utils.Object + + err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if !info.IsDir() { + objList = append(objList, utils.Object{ + ChecksumAlgorithm: []string{}, + ETag: "", + Key: path, + LastModified: info.ModTime(), + Size: info.Size(), + StorageClass: "Standard", + }) + } + + return nil + }) + + if err != nil { + return err + } + + jobs := make(chan utils.Object, len(objList)) + resultChan := make(chan error, len(objList)) + + var wg sync.WaitGroup + for i := 0; i < osc.threads; i++ { + wg.Add(1) + go func() { + defer wg.Done() + mPutWorker(osc, dirPath, jobs, resultChan) + }() + } + + for _, obj := range objList { + jobs <- obj + } + close(jobs) + + go func() { + wg.Wait() + close(resultChan) + }() + + for err := range resultChan { + if err != nil { + return err + } + } + + return nil +} + +func mPutWorker(osc *OSController, dirPath string, jobs chan utils.Object, resultChan chan<- error) { + for obj := range jobs { + src, err := os.Open(obj.Key) + if err != nil { + resultChan <- err + continue + } + defer src.Close() + + fileName, err := filepath.Rel(dirPath, obj.Key) + if err != nil { + resultChan <- err + continue + } + fileName = strings.ReplaceAll(filepath.Join(filepath.Base(dirPath), fileName), "\\", "/") + + fmt.Println(fileName) + + dst, err := osc.osfs.Create(fileName) + if err != nil { + resultChan <- err + continue + } + defer dst.Close() + + n, err := io.Copy(dst, src) + if err != nil { + resultChan <- err + continue + } + + if n != obj.Size { + resultChan <- errors.New("put failed") + continue + } + + dst.Close() + src.Close() + + resultChan <- nil + } +} From 7251f73444c9c01b421a45fa54f8f5532c612717 Mon Sep 17 00:00:00 2001 From: tykim96 Date: Fri, 3 Nov 2023 11:19:16 +0900 Subject: [PATCH 3/5] test: cm-data-mold test code --- pkg/dummy/semistructed/semistructed_test.go | 25 ++++ pkg/dummy/structed/structed_test.go | 27 ++++ pkg/dummy/unstructed/unstructed_test.go | 40 +++++ service/nrdbc/nrdbc_test.go | 137 +++++++++++++++++ service/osc/osc_test.go | 103 +++++++++++++ service/rdbc/rdbc_test.go | 156 ++++++++++++++++++++ 6 files changed, 488 insertions(+) create mode 100644 pkg/dummy/semistructed/semistructed_test.go create mode 100644 pkg/dummy/structed/structed_test.go create mode 100644 pkg/dummy/unstructed/unstructed_test.go create mode 100644 service/nrdbc/nrdbc_test.go create mode 100644 service/osc/osc_test.go create mode 100644 service/rdbc/rdbc_test.go diff --git a/pkg/dummy/semistructed/semistructed_test.go b/pkg/dummy/semistructed/semistructed_test.go new file mode 100644 index 0000000..8319c08 --- /dev/null +++ b/pkg/dummy/semistructed/semistructed_test.go @@ -0,0 +1,25 @@ +package semistructed_test + +import ( + "testing" + + "fmt" + + "github.com/cloud-barista/cm-data-mold/pkg/dummy/semistructed" +) + +func TestJSON(t *testing.T) { + // Enter the directory path and total data size (in GB) to store json dummy data + if err := semistructed.GenerateRandomJSON("json-dummy-directory-path", 1); err != nil { + fmt.Printf("test json error : %v", err) + panic(err) + } +} + +func TestXML(t *testing.T) { + // Enter the directory path and total data size in GB to store xml dummy data + if err := semistructed.GenerateRandomXML("xml-dummy-directory-path", 1); err != nil { + fmt.Printf("test xml error : %v", err) + panic(err) + } +} diff --git a/pkg/dummy/structed/structed_test.go b/pkg/dummy/structed/structed_test.go new file mode 100644 index 0000000..36acb63 --- /dev/null +++ b/pkg/dummy/structed/structed_test.go @@ -0,0 +1,27 @@ +package structed_test + +import ( + "path/filepath" + "testing" + + "fmt" + + "github.com/cloud-barista/cm-data-mold/pkg/dummy/structed" +) + +func TestCSV(t *testing.T) { + // Enter the directory path and total data size in GB to store csv dummy data + if err := structed.GenerateRandomCSV(filepath.Join("csv-dummy-directory-path", "csv"), 100); err != nil { + fmt.Printf("test csv error : %v", err) + panic(err) + } + +} + +func TestSQL(t *testing.T) { + // Enter the directory path and total data size in GB to store sql dummy data + if err := structed.GenerateRandomSQL(filepath.Join("sql-dummy-directory-path", "sql"), 100); err != nil { + fmt.Printf("test sql error : %v", err) + panic(err) + } +} diff --git a/pkg/dummy/unstructed/unstructed_test.go b/pkg/dummy/unstructed/unstructed_test.go new file mode 100644 index 0000000..2c1195c --- /dev/null +++ b/pkg/dummy/unstructed/unstructed_test.go @@ -0,0 +1,40 @@ +package unstructed_test + +import ( + "fmt" + "testing" + + "github.com/cloud-barista/cm-data-mold/pkg/dummy/unstructed" +) + +func TestIMG(t *testing.T) { + // Enter the directory path and total data size, in GB, to store the img dummy data + if err := unstructed.GenerateRandomPNGImage("img-dummy-directory-path", 1); err != nil { + fmt.Printf("test img error : %v", err) + panic(err) + } +} + +func TestGIF(t *testing.T) { + // Enter the directory path and total data size in GB to store gif dummy data + if err := unstructed.GenerateRandomPNGImage("gif-dummy-directory-path", 1); err != nil { + fmt.Printf("test gif error : %v", err) + panic(err) + } +} + +func TestTXT(t *testing.T) { + // Enter the directory path and total data size, in GB, to store txt dummy data + if err := unstructed.GenerateRandomTXT("txt-dummy-directory-path", 1); err != nil { + fmt.Printf("test txt error : %v", err) + panic(err) + } +} + +func TestZIP(t *testing.T) { + // Enter the directory path and total data size in GB to store zip dummy data + if err := unstructed.GenerateRandomTXT("zip-dummy-directory-path", 1); err != nil { + fmt.Printf("test zip error : %v", err) + panic(err) + } +} diff --git a/service/nrdbc/nrdbc_test.go b/service/nrdbc/nrdbc_test.go new file mode 100644 index 0000000..8d2d01c --- /dev/null +++ b/service/nrdbc/nrdbc_test.go @@ -0,0 +1,137 @@ +package nrdbc_test + +import ( + "context" + "encoding/json" + "fmt" + "testing" + + "cloud.google.com/go/firestore" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/cloud-barista/cm-data-mold/pkg/nrdbms/awsdnmdb" + "github.com/cloud-barista/cm-data-mold/pkg/nrdbms/gcpfsdb" + "github.com/cloud-barista/cm-data-mold/pkg/nrdbms/ncpmgdb" + "github.com/cloud-barista/cm-data-mold/service/nrdbc" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "google.golang.org/api/option" +) + +// dynamo to firestore example +func TestMain(m *testing.M) { + awsnrdbc, err := AWSInfo("your-aws-accessKey", "your-aws-secretKey", "your-aws-reigon") + if err != nil { + panic(err) + } + + gcpnrdc, err := GCPInfo("your-gcp-projectID", "your-gcp-credentialsFile") + if err != nil { + panic(err) + } + + var srcData []map[string]interface{} + err = json.Unmarshal([]byte(exJSON), &srcData) + if err != nil { + panic(err) + } + + if err := awsnrdbc.Put("address", &srcData); err != nil { + panic(err) + } + + var dstData []map[string]interface{} + if err := awsnrdbc.Get("address", &dstData); err != nil { + panic(err) + } + + fmt.Println(dstData) + + // dynamo to firestore + if err := awsnrdbc.Copy(gcpnrdc); err != nil { + panic(err) + } +} + +func AWSInfo(accessKey, secretKey, region string) (*nrdbc.NRDBController, error) { + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")), + config.WithRegion(region), + config.WithRetryMaxAttempts(5), + ) + + if err != nil { + return nil, err + } + + return nrdbc.New(awsdnmdb.New(dynamodb.NewFromConfig(cfg), region)) +} + +func GCPInfo(projectID, credentialsFile string) (*nrdbc.NRDBController, error) { + client, err := firestore.NewClient( + context.TODO(), + projectID, + option.WithCredentialsFile(credentialsFile), + ) + if err != nil { + return nil, err + } + + return nrdbc.New(gcpfsdb.New(client, "your-aws-region")) +} + +func NCPInfo(username, password, host, port, dbName string) (*nrdbc.NRDBController, error) { + dc := true + mongoClient, err := mongo.Connect(context.Background(), &options.ClientOptions{ + Auth: &options.Credential{ + Username: username, + Password: password, + }, + Direct: &dc, + Hosts: []string{fmt.Sprintf("%s:%s", host, port)}, + }) + + if err != nil { + return nil, err + } + + return nrdbc.New(ncpmgdb.New(mongoClient, dbName)) +} + +const exJSON string = ` +[ + { + "addr_id": "DhiFnPGUcQUL8wgSkx1ky1hb", + "countryabr": "SE", + "street": "8350 West Loopbury", + "city": "St. Petersburg", + "state": "Idaho", + "zip": "58295", + "country": "Brazil", + "latitude": 43, + "longitude": -11 + }, + { + "addr_id": "Hf1ct5b8OCySK3OxiUET1nQ4", + "countryabr": "IN", + "street": "6258 New Viaberg", + "city": "Boston", + "state": "Mississippi", + "zip": "70833", + "country": "Lebanon", + "latitude": -42, + "longitude": 70 + }, + { + "addr_id": "s3suVD7d1hFUUz4ci6Z1TMAb", + "countryabr": "PL", + "street": "2497 Squarestad", + "city": "Fort Wayne", + "state": "Missouri", + "zip": "50560", + "country": "Hong Kong", + "latitude": 37, + "longitude": 123 + } +]` diff --git a/service/osc/osc_test.go b/service/osc/osc_test.go new file mode 100644 index 0000000..325413e --- /dev/null +++ b/service/osc/osc_test.go @@ -0,0 +1,103 @@ +package osc_test + +import ( + "context" + "testing" + + "cloud.google.com/go/storage" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/cloud-barista/cm-data-mold/pkg/objectstorage/gcsfs" + "github.com/cloud-barista/cm-data-mold/pkg/objectstorage/s3fs" + "github.com/cloud-barista/cm-data-mold/pkg/utils" + "github.com/cloud-barista/cm-data-mold/service/osc" + "google.golang.org/api/option" +) + +// dynamo to firestore example +func TestMain(m *testing.M) { + awsosc, err := AWSInfo("your-aws-accessKey", "your-aws-secretKey", "your-aws-reigon", "your-aws-bucket-name") + if err != nil { + panic(err) + } + + gcposc, err := GCPInfo("your-gcp-projectID", "your-gcp-credentialsFile", "your-gcp-reigon", "your-gcp-bucket-name") + if err != nil { + panic(err) + } + + // aws import + if err := awsosc.MPut("your-upload-directory-path"); err != nil { + panic(err) + } + + // aws export + if err := awsosc.MGet("your-upload-directory-path"); err != nil { + panic(err) + } + + // s3 to gcs + if err := awsosc.Copy(gcposc); err != nil { + panic(err) + } +} + +func AWSInfo(accessKey, secretKey, region, bucketName string) (*osc.OSController, error) { + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")), + config.WithRegion(region), + config.WithRetryMaxAttempts(5), + ) + + if err != nil { + return nil, err + } + + client := s3.NewFromConfig(cfg, func(o *s3.Options) { o.UsePathStyle = true }) + + return osc.New(s3fs.New(utils.AWS, client, bucketName, region)) +} + +func NCPInfo(accessKey, secretKey, endpoint, region, bucketName string) (*osc.OSController, error) { + customResolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { + if service == "S3" { + return aws.Endpoint{ + PartitionID: "aws", + URL: endpoint, + SigningRegion: region, + }, nil + } + + return aws.Endpoint{}, &aws.EndpointNotFoundError{} + }) + + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")), + config.WithRegion(region), + config.WithRetryMaxAttempts(5), + config.WithEndpointResolver(customResolver), + ) + + if err != nil { + return nil, err + } + + client := s3.NewFromConfig(cfg, func(o *s3.Options) { o.UsePathStyle = true }) + + return osc.New(s3fs.New(utils.AWS, client, bucketName, region)) +} + +func GCPInfo(projectID, credentialsFile, region, bucketName string) (*osc.OSController, error) { + client, err := storage.NewClient( + context.TODO(), + option.WithCredentialsFile(credentialsFile), + ) + + if err != nil { + return nil, err + } + + return osc.New(gcsfs.New(client, projectID, bucketName, region)) +} diff --git a/service/rdbc/rdbc_test.go b/service/rdbc/rdbc_test.go new file mode 100644 index 0000000..57e4c83 --- /dev/null +++ b/service/rdbc/rdbc_test.go @@ -0,0 +1,156 @@ +package rdbc_test + +import ( + "database/sql" + "fmt" + "testing" + + "github.com/cloud-barista/cm-data-mold/pkg/rdbms/mysql" + "github.com/cloud-barista/cm-data-mold/pkg/utils" + "github.com/cloud-barista/cm-data-mold/service/rdbc" +) + +func TestMain(m *testing.M) { + // aws example + // RDBCInfo(utils.AWS,"admin","datamoldPassword","127.0.0.1","3306") + // gcp example + // RDBCInfo(utils.GCP,"root","datamoldPassword","127.0.0.1","3306") + // ncp example + // RDBCInfo(utils.NCP,"root","datamoldPassword","127.0.0.1","3306") + Srdbc := RDBCInfo("your-source-provider", "your-source-mysql-username", "your-source-mysql-password", "your-source-mysql-host", "your-source-mysql-port") + Drdbc := RDBCInfo("your-target-provider", "your-target-mysql-username", "your-target-mysql-password", "your-target-mysql-host", "your-target-mysql-port") + + // Srdbc에 LibraryManagement.sql import + if err := Srdbc.Put(testSQL); err != nil { + panic(err) + } + + // Srdbc의 LibraryManagement Export + var dstSQL string + if err := Srdbc.Get("LibraryManagement", &dstSQL); err != nil { + panic(err) + } + + // Srdbc db들을 Drdbc로 이전 + if err := Srdbc.Copy(Drdbc); err != nil { + panic(err) + } +} + +func RDBCInfo(providerType utils.Provider, username, password, host, port string) *rdbc.RDBController { + sqlDB, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%s)/", username, password, host, port)) + if err != nil { + panic(err) + } + + if err := sqlDB.Ping(); err != nil { + panic(err) + } + + rdbController, err := rdbc.New(mysql.New(providerType, sqlDB)) + if err != nil { + panic(err) + } + + return rdbController +} + +const testSQL string = `CREATE DATABASE IF NOT EXISTS LibraryManagement; + +USE LibraryManagement; + +DROP TABLE IF EXISTS Books; + +CREATE TABLE Books ( + BookID INT AUTO_INCREMENT, + Title VARCHAR(255), + Author VARCHAR(255), + PublicationYear INT, + Publisher VARCHAR(255), + Quantity INT, + PRIMARY KEY (BookID) +); + +INSERT INTO Books (Title, Author, PublicationYear, Publisher, Quantity) VALUES ("tCEzLpdeW", "NseXeqlBh", 224, "MgOap", 254); + +INSERT INTO Books (Title, Author, PublicationYear, Publisher, Quantity) VALUES ("jEWuQzfTY", "YgUjwtNpHU", 220, "QSLCdwY", 157); + +INSERT INTO Books (Title, Author, PublicationYear, Publisher, Quantity) VALUES ("iMes", "ihodtYg", 211, "zjpjyMKlF", 228); + +INSERT INTO Books (Title, Author, PublicationYear, Publisher, Quantity) VALUES ("wSbEMUU", "iLmPOEas", 55, "TzbA", 62); + +INSERT INTO Books (Title, Author, PublicationYear, Publisher, Quantity) VALUES ("DNKqE", "Dajh", 214, "DjEUpBg", 255); + +INSERT INTO Books (Title, Author, PublicationYear, Publisher, Quantity) VALUES ("HuqfKx", "FqWSBOQs", 147, "pHZOJuB", 11); + +INSERT INTO Books (Title, Author, PublicationYear, Publisher, Quantity) VALUES ("TQVNDEdZ", "fWaE", 171, "EMhFiAHf", 245); + +INSERT INTO Books (Title, Author, PublicationYear, Publisher, Quantity) VALUES ("BTMvof", "YLsyBWXQhv", 170, "PMoIFLg", 22); + +INSERT INTO Books (Title, Author, PublicationYear, Publisher, Quantity) VALUES ("TsbXTGvs", "yvkJgFKTU", 14, "TivqAzaGAl", 215); + +DROP TABLE IF EXISTS Members; + +CREATE TABLE Members ( + MemberID INT AUTO_INCREMENT, + Name VARCHAR(255), + Address VARCHAR(255), + PhoneNo VARCHAR(20), + Email VARCHAR(50), + JoinedDate DATE, + ExpiryDate DATE, + IsActive BOOLEAN DEFAULT 1, + PRIMARY KEY (MemberID) +); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("YSzcg", "ZWgffcBi", "EcCnDjq" ,"GJMNruf" ,"1996-01-04" ,"2022-03-21" , 0 ); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("qMmHPvGXtj", "qVNJrmdG", "XSArhl" ,"mDFFwsQJg" ,"1966-10-08" ,"1997-10-14" , 0 ); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("SIbQzF", "QEfwEpVA", "xjAGI" ,"IpcaUF" ,"1931-09-28" ,"1902-10-23" , 0 ); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("vxnpMWCNz", "Cnpd", "FRjSjXsHLy" ,"DyVpKicQ" ,"1992-10-07" ,"1917-09-29" , 1 ); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("fISmWpzwun", "VznGPW", "pnWIj" ,"CzBfTjMHQ" ,"1972-04-25" ,"1922-07-30" , 1 ); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("gdyeW", "xzYkFQFNE", "GJaAPzRS" ,"jbsXGHBnP" ,"1968-12-29" ,"1907-08-22" , 1 ); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("pGHvPVrqS", "CGRZRIxYF", "ybitYUs" ,"HTOJUEGEmy" ,"1964-05-30" ,"1992-03-30" , 1 ); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("eFeyV", "qWJQAPYU", "uPOv" ,"RhCYyjv" ,"2023-08-28" ,"1981-05-24" , 0 ); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("XtusB", "wtAMte", "iFPRhNT" ,"yeuFBB" ,"2006-01-19" ,"1943-02-22" , 1 ); + +INSERT INTO Members (Name, Address, PhoneNo ,Email ,JoinedDate ,ExpiryDate ,IsActive ) VALUES ("rdtWz", "cqqSDTp", "cxxhl" ,"FpVOi" ,"1909-04-17" ,"1960-10-08" , 1 ); + +DROP TABLE IF EXISTS BorrowedBooks; + +CREATE TABLE BorrowedBooks ( + BorrowID INT AUTO_INCREMENT, + MemberID INT, + BookID INT, + BorrowedDate DATE, + DueDate DATE, + ReturnedDate DATE NULL DEFAULT NULL, + FinePaid DECIMAL(5,2) DEFAULT 0.00, + PRIMARY KEY (BorrowID) +); + +INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate, FinePaid) VALUES (157, 82, "1935-09-26", "2013-07-07", "2022-04-19", 8); + +INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate, FinePaid) VALUES (219, 195, "1980-01-24", "1909-05-23", "1907-04-04", 110); + +INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate, FinePaid) VALUES (218, 127, "2019-02-06", "2023-12-01", "1986-09-03", 146); + +INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate, FinePaid) VALUES (239, 18, "1930-04-08", "1919-03-06", "1982-03-10", 184); + +INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate, FinePaid) VALUES (231, 31, "1957-07-31", "1975-04-04", "1988-12-07", 120); + +INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate, FinePaid) VALUES (228, 25, "1945-07-11", "1997-01-29", "1995-12-12", 105); + +INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate, FinePaid) VALUES (141, 162, "1901-12-05", "2020-11-14", "1912-10-05", 121); + +INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate, FinePaid) VALUES (157, 53, "2006-07-28", "2023-01-11", "1942-11-09", 14); + +INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate, FinePaid) VALUES (39, 111, "2015-07-15", "1977-12-22", "1957-08-11", 169); +` From 43015ff743d24b030f439bbe11f1bae6e8fe3eec Mon Sep 17 00:00:00 2001 From: tykim96 Date: Fri, 3 Nov 2023 11:19:49 +0900 Subject: [PATCH 4/5] chore: update go.mod --- go.mod | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/go.mod b/go.mod index bef0dc2..eb18542 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,8 @@ require ( cloud.google.com/go/firestore v1.14.0 cloud.google.com/go/storage v1.34.1 github.com/aws/aws-sdk-go-v2 v1.22.1 + github.com/aws/aws-sdk-go-v2/config v1.22.0 + github.com/aws/aws-sdk-go-v2/credentials v1.15.1 github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.0 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.1 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.25.0 @@ -23,8 +25,10 @@ require ( cloud.google.com/go/iam v1.1.3 // indirect cloud.google.com/go/longrunning v0.5.2 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.5.0 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.1 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.17.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 // indirect @@ -32,6 +36,9 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 // indirect github.com/aws/smithy-go v1.16.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect From 87bf3e3006549706d10fd459e0c4bdb1abbeeb67 Mon Sep 17 00:00:00 2001 From: tykim96 Date: Fri, 3 Nov 2023 11:23:17 +0900 Subject: [PATCH 5/5] docs: Add Environments Information OS version and Go version, which are work environments, have been added. close #6 --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 22eabb7..2c4152b 100644 --- a/README.md +++ b/README.md @@ -5,3 +5,8 @@ Data Mold는 데이터 마이그레이션 기술의 검증을 위한 환경을 1. 데이터 저장소(스토리지 또는 데이터베이스)를 목표 및 소스 컴퓨팅 환경에 생성한다. 2. 생성된 소스 데이터 저장소에 테스트 데이터를 생성 및 저장한다. 3. 소스에서 목표 컴퓨팅 환경으로 데이터 복제/마이그레이션을 수행하며, 이때 데이터 전/후처리 작업을 수행한다. + +## Environments: + +* OS; Ubuntu 20.04 LTS, Windows 10 Pro +* Go: 1.21.3 \ No newline at end of file