From 91d6d20f184361f3a16f66ced6d03b3123006ec8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Chitomizzz=E2=80=9D?= Date: Wed, 10 Jul 2024 17:20:19 +0800 Subject: [PATCH] release 3.00.0 --- README.md | 16 +- api/pool.go | 17 + api/utils.go | 4 +- go.mod | 2 +- multigoroutinetable/multi_goroutine_table.go | 335 +++++---- .../multi_goroutine_table_test.go | 22 +- multigoroutinetable/queue.go | 302 +++++++- multigoroutinetable/writer_goroutine.go | 255 ++++++- streaming/goroutine_client.go | 1 - test/connectionPool_test.go | 80 +++ test/loadTable_test.go | 23 +- test/multigoroutinetable_test.go | 673 ++++++------------ test/setup/settings.go | 7 - .../streaming/goroutineClient_reverse_test.go | 394 ++++++++++ test/streaming/goroutineClient_test.go | 402 +++++++++++ .../goroutinePooledClient_reverse_test.go | 397 ++++++++++- test/streaming/goroutinePooledClient_test.go | 397 ++++++++++- test/streaming/pollingClient_reverse_test.go | 502 +++++++++++++ test/streaming/pollingClient_test.go | 505 ++++++++++++- test/streaming/util.go | 229 +++++- 20 files changed, 3866 insertions(+), 697 deletions(-) diff --git a/README.md b/README.md index 2abad14..a6d17e5 100644 --- a/README.md +++ b/README.md @@ -168,7 +168,7 @@ Go API 提供的最核心的接口是 `DolphinDB`。Go API 通过该接口在 `D ## 2. 安装依赖 -Go API 需要运行在 golang 1.15 或以上版本的环境。 +Go API 需要运行在 Go 1.15 或以上版本的环境。注意,Go API 只支持在 64 位的 Go 环境中运行。 使用 `go get` 下载安装 `Go API`。 ```sh @@ -605,6 +605,7 @@ func main() { | GetPoolSize() | 获取连接数 | | Close() | 关闭连接池 | | IsClosed() | 检查连接池是否关闭 | +| RefreshTimeout(t time.Duration) | 重置超时时间 | PoolOption 参数说明: @@ -615,6 +616,7 @@ PoolOption 参数说明: - LoadBalanceAddresses: 字符串数组,用于指定数据节点。 - EnableHighAvailability: 指定是否开启高可用。 - HighAvailabilitySites: 指定高可用节点地址,当从 Server 获取到的节点地址无法访问时,可通过该配置手动指定。 +- Timeout: 指定每个任务执行的超时时间。 `Task` 封装了查看任务执行结果的相关方法。 @@ -1035,14 +1037,18 @@ err = writer.Insert("2", time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC) #### GetUnwrittenData ```go -GetUnwrittenData() [][]model.DataType +GetUnwrittenData() [][]interface{} ``` 函数说明: 返回一个嵌套列表,表示未写入服务器的数据。 -注意:该方法获取到数据资源后,`MultiGoroutineTable` 将释放这些数据资源。 +注意 + +1. 返回的结果以 `MultiGoroutineTable` 存储的中间变量的形式存在,只能给写入相同 schema 表的 `MultiGoroutineTable` 使用。 + +2. 该方法获取到数据资源后,`MultiGoroutineTable` 将释放这些数据资源。 示例: @@ -1053,12 +1059,12 @@ unwrittenData := writer.GetUnwrittenData() #### InsertUnwrittenData ```go -InsertUnwrittenData(records [][]model.DataType) error +InsertUnwrittenData(records [][]interface{}) error ``` 函数说明: -将数据插入数据表。返回值同 insert 方法。与 insert 方法的区别在于,insert 只能插入单行数据,而 insertUnwrittenData 可以同时插入多行数据。 +将通过 GetUnwrittenData 得到的数据插入数据表。返回值同 insert 方法。 参数说明: diff --git a/api/pool.go b/api/pool.go index 9cd6b9c..f2e15ff 100644 --- a/api/pool.go +++ b/api/pool.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" "sync" + "time" "github.com/dolphindb/api-go/dialer" "github.com/dolphindb/api-go/model" @@ -19,6 +20,7 @@ type DBConnectionPool struct { loadBalanceAddresses []string connections chan dialer.Conn + timeout time.Duration } // PoolOption helps you to configure DBConnectionPool by calling NewDBConnectionPool. @@ -47,13 +49,24 @@ type PoolOption struct { // addresses of load balance LoadBalanceAddresses []string + + // refresh time of every connection + Timeout time.Duration } // NewDBConnectionPool inits a DBConnectionPool object and configures it with opt, finally returns it. func NewDBConnectionPool(opt *PoolOption) (*DBConnectionPool, error) { + timeout := time.Minute + if opt.Timeout != 0 { + if opt.Timeout < 0 { + return nil, errors.New("Timeout must be equal or greater than 0") + } + timeout = opt.Timeout + } p := &DBConnectionPool{ isLoadBalance: opt.LoadBalance, loadBalanceAddresses: opt.LoadBalanceAddresses, + timeout: timeout, } if opt.PoolSize < 1 { @@ -107,6 +120,9 @@ func newConn(addr string, opt *PoolOption) (dialer.Conn, error) { return conn, nil } +func (d *DBConnectionPool) RefreshTimeout(t time.Duration) { + d.timeout = t +} // Execute executes all task by connections with DBConnectionPool. func (d *DBConnectionPool) Execute(tasks []*Task) error { wg := sync.WaitGroup{} @@ -118,6 +134,7 @@ func (d *DBConnectionPool) Execute(tasks []*Task) error { wg.Add(1) go func(task *Task) { conn := <-d.connections + conn.RefreshTimeout(d.timeout) task.result, task.err = d.RunTask(conn, task) d.connections <- conn wg.Done() diff --git a/api/utils.go b/api/utils.go index a9b35f8..c4895d8 100644 --- a/api/utils.go +++ b/api/utils.go @@ -8,12 +8,12 @@ import ( ) func generateDBName() string { - u1 := uuid.Must(uuid.NewV4()) + u1 := uuid.NewV4() return fmt.Sprintf("db_%s", u1.String()[:8]) } func generateTableName() string { - u1 := uuid.Must(uuid.NewV4()) + u1 := uuid.NewV4() return fmt.Sprintf("tb_%s", u1.String()[:8]) } diff --git a/go.mod b/go.mod index 0ca31d6..6de472c 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/dolphindb/api-go go 1.15 require ( - github.com/satori/go.uuid v1.2.1-0.20181016170032-d91630c85102 + github.com/satori/go.uuid v1.1.0 github.com/smallnest/chanx v1.0.0 github.com/stretchr/testify v1.7.2 ) diff --git a/multigoroutinetable/multi_goroutine_table.go b/multigoroutinetable/multi_goroutine_table.go index f23a1e5..1a0f1cd 100644 --- a/multigoroutinetable/multi_goroutine_table.go +++ b/multigoroutinetable/multi_goroutine_table.go @@ -6,6 +6,7 @@ import ( "fmt" "strconv" "strings" + "time" "github.com/dolphindb/api-go/dialer" "github.com/dolphindb/api-go/domain" @@ -20,6 +21,7 @@ type MultiGoroutineTable struct { colNames []string colTypes []int partitionColumnIdx int32 + partitionTypeList chan model.DataTypeList partitionDomain domain.Domain goroutines []*writerGoroutine @@ -105,27 +107,26 @@ func (mtt *MultiGoroutineTable) generateMultiGoroutineTable(opt *Option) error { // Insert inserts data into the table. // The length of args must be equal with the number of columns of the table. func (mtt *MultiGoroutineTable) Insert(args ...interface{}) error { - if mtt.isExist() { - return errors.New("goroutine already exists") + if mtt.isExit() { + return errors.New("goroutine already exits") } if len(args) != len(mtt.colTypes) { return errors.New("column counts don't match") } - prow, err := mtt.getDataTypes(args...) + goroutineInd, err := mtt.getGoroutineInd(args) if err != nil { + fmt.Printf("Failed to get goroutine index: %s\n", err.Error()) return err } - goroutineInd, err := mtt.getGoroutineInd(prow) + err = mtt.insertInterfaceToGoroutine(goroutineInd, args) if err != nil { - fmt.Printf("Failed to get goroutine index: %s\n", err.Error()) + fmt.Printf("Failed to insert interface: %s\n", err.Error()) return err } - mtt.insertGoroutineWrite(goroutineInd, prow) - return nil } @@ -176,7 +177,7 @@ func getDataType(dt model.DataTypeByte, v interface{}) (model.DataType, error) { func (mtt *MultiGoroutineTable) GetStatus() *Status { s := &Status{ ErrMsg: mtt.errorInfo, - IsExit: mtt.isExist(), + IsExit: mtt.isExit(), GoroutineStatusList: make([]*GoroutineStatus, len(mtt.goroutines)), GoroutineStatus: make([]*GoroutineStatus, len(mtt.goroutines)), } @@ -196,83 +197,162 @@ func (mtt *MultiGoroutineTable) GetStatus() *Status { } // GetUnwrittenData returns the total of unsent data and failed data. -func (mtt *MultiGoroutineTable) GetUnwrittenData() [][]model.DataType { - data := make([][]model.DataType, 0) -loop: +func (mtt *MultiGoroutineTable) GetUnwrittenData() [][]interface{} { + data := make([][]interface{}, 0) for _, v := range mtt.goroutines { - for { - if val := v.failedQueue.load(); val != nil { - data = append(data, val) - } else { - break - } + if val := v.failedQueue.popAll(); val != nil { + data = append(data, val...) } - for { - if val := v.writeQueue.load(); val != nil { - data = append(data, val) - } else { - break loop - } + if val := v.writeQueue.popAll(); val != nil { + data = append(data, val...) } } return data } -// InsertUnwrittenData inserts data into the table. -// You can insert data obtained from GetUnwrittenData with this function. -func (mtt *MultiGoroutineTable) InsertUnwrittenData(records [][]model.DataType) error { - if mtt.isExist() { - return errors.New("goroutine already exists") - } - - var err error - if len(mtt.goroutines) > 1 { - if mtt.isPartition { - err = mtt.insertPartitionTable(records) - } else { - err = mtt.insertNonPartitionTable(records) - } - } else { - for _, v := range records { - mtt.insertGoroutineWrite(0, v) +func (mtt *MultiGoroutineTable) mockInterface(v []interface{}) ([]interface{}, int, error) { + ret := make([]interface{}, 0) + count := -1 + for ind, dt := range mtt.colTypes { + switch model.DataTypeByte(dt) { + case model.DtBool: + _, ok := v[ind].([]byte) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect byte slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]byte)) + ret = append(ret, v[ind].([]byte)[0]) + case model.DtBlob: + _, ok := v[ind].([][]byte) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect []byte slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([][]byte)) + ret = append(ret, v[ind].([][]byte)[0]) + case model.DtChar, model.DtCompress: + _, ok := v[ind].([]byte) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect []byte slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]byte)) + ret = append(ret, v[ind].([]byte)[0]) + case model.DtComplex, model.DtPoint: + _, ok := v[ind].([][2]float64) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect [2]float64 slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]float64)) + ret = append(ret, v[ind].([][2]float64)[0]) + case model.DtShort: + _, ok := v[ind].([]int16) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect int16 slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]int16)) + ret = append(ret, v[ind].([]int16)[0]) + case model.DtInt: + _, ok := v[ind].([]int32) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect int32 slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]int32)) + ret = append(ret, v[ind].([]int32)[0]) + case model.DtLong: + _, ok := v[ind].([]int64) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect int64 slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]int64)) + ret = append(ret, v[ind].([]int64)[0]) + case model.DtFloat: + _, ok := v[ind].([]float32) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect float32 slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]float32)) + ret = append(ret, v[ind].([]float32)[0]) + case model.DtDouble: + _, ok := v[ind].([]float64) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect float64 slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]float64)) + ret = append(ret, v[ind].([]float64)[0]) + case model.DtDecimal32: + _, ok := v[ind].([]*model.Decimal32) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect Decimal32 slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]*model.Decimal32)) + ret = append(ret, v[ind].([]*model.Decimal32)[0]) + case model.DtDecimal64: + _, ok := v[ind].([]*model.Decimal64) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect Decimal64 slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]*model.Decimal64)) + ret = append(ret, v[ind].([]*model.Decimal64)[0]) + case model.DtDecimal128: + _, ok := v[ind].([]*model.Decimal128) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect Decimal128 slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]*model.Decimal128)) + ret = append(ret, v[ind].([]*model.Decimal128)[0]) + case model.DtDate, model.DtDateHour, model.DtDateMinute, model.DtDatetime, model.DtMinute, model.DtMonth, model.DtNanoTime, model.DtSecond, model.DtTime, model.DtTimestamp, model.DtNanoTimestamp: + _, ok := v[ind].([]time.Time) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect time.Time slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]time.Time)) + ret = append(ret, v[ind].([]time.Time)[0]) + case model.DtUUID, model.DtSymbol, model.DtString, model.DtDuration, model.DtInt128, model.DtIP: + _, ok := v[ind].([]string) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect string slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]string)) + ret = append(ret, v[ind].([]string)[0]) + case model.DtAny: + _, ok := v[ind].([]model.DataForm) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect DataForm slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]model.DataForm)) + ret = append(ret, v[ind].([]model.DataForm)[0]) + default: + _, ok := v[ind].([]model.DataType) + if !ok { + return nil, 0, fmt.Errorf("col %d of type %s expect DataType slice", ind, model.GetDataTypeString(model.DataTypeByte(dt))) + } + count = len(v[ind].([]model.DataType)) + ret = append(ret, v[ind].([]model.DataType)[0]) } } - - return err + return ret, count, nil } -func (mtt *MultiGoroutineTable) insertNonPartitionTable(records [][]model.DataType) error { - vct, err := mtt.getVector(records, mtt.goroutineByColIndexForNonPartition) - if err != nil { - fmt.Printf("Failed to package vector: %s\n", err.Error()) - return err - } - - for k, v := range records { - goroutineInd := vct.HashBucket(k, len(mtt.goroutines)) - mtt.insertGoroutineWrite(goroutineInd, v) - } - - return nil -} - -func (mtt *MultiGoroutineTable) insertPartitionTable(records [][]model.DataType) error { - vct, err := mtt.getVector(records, int(mtt.partitionColumnIdx)) - if err != nil { - fmt.Printf("Failed to pack vector: %s\n", err.Error()) - return err - } - - goroutineIndexes, err := mtt.partitionDomain.GetPartitionKeys(vct) - if err != nil { - fmt.Printf("Failed to call GetPartitionKeys: %s\n", err.Error()) - return err +// InsertUnwrittenData inserts data into the table. +// You can insert data obtained from GetUnwrittenData with this function. +func (mtt *MultiGoroutineTable) InsertUnwrittenData(records [][]interface{}) error { + if mtt.isExit() { + return errors.New("goroutine already exits") } - - for k, v := range goroutineIndexes { - mtt.insertGoroutineWrite(v, records[k]) + for _, v := range records { + mock, count, err := mtt.mockInterface(v) + if err != nil { + return err + } + goroutineInd, err := mtt.getGoroutineInd(mock) + wt := mtt.goroutines[goroutineInd] + if err != nil { + fmt.Printf("Failed to get goroutine index: %s\n", err.Error()) + return err + } + mtt.goroutines[goroutineInd].writeQueue.addBatch(v, count) + wt.signal.Signal() } return nil @@ -460,6 +540,14 @@ func (mtt *MultiGoroutineTable) assignForPartitionTable(dt model.DataType, schem fmt.Printf("Failed to create domain: %s\n", err.Error()) return err } + chanNum := 16 + if len(mtt.goroutines) > chanNum { + chanNum = len(mtt.goroutines) + } + mtt.partitionTypeList = make(chan model.DataTypeList, chanNum) + for i := 0; i < chanNum; i++ { + mtt.partitionTypeList <- model.NewEmptyDataTypeList(model.DataTypeByte(mtt.colTypes[mtt.partitionColumnIdx]), 1) + } return nil } @@ -527,23 +615,50 @@ func validateOption(opt *Option) error { return nil } -func (mtt *MultiGoroutineTable) getGoroutineIndForPartitionTable(prow []model.DataType) (int, error) { +func (mtt *MultiGoroutineTable) getGoroutineIndForPartitionTable(prow []interface{}) (int, error) { var goroutineInd int s := prow[mtt.partitionColumnIdx] if s != nil { - dtl := model.NewDataTypeList(s.DataType(), []model.DataType{s}) - pvc := model.NewVector(dtl) + select { + case list := <-mtt.partitionTypeList: + defer func() { + mtt.partitionTypeList <- list + }() + err := list.SetWithRawData(0, s) + if err != nil { + return 0, err + } + pvc := model.NewVector(list) + indexes, err := mtt.partitionDomain.GetPartitionKeys(pvc) + if err != nil { + fmt.Printf("Failed to call GetPartitionKeys: %s\n", err.Error()) + return 0, err + } - indexes, err := mtt.partitionDomain.GetPartitionKeys(pvc) - if err != nil { - fmt.Printf("Failed to call GetPartitionKeys: %s\n", err.Error()) - return 0, err - } + if len(indexes) > 0 { + goroutineInd = indexes[0] + } else { + return 0, errors.New("failed to obtain the partition scheme") + } + default: + partitionValue, err := getDataType(model.DataTypeByte(mtt.colTypes[mtt.partitionColumnIdx]), s) + if err != nil { + return -1, err + } + dtl := model.NewDataTypeList(partitionValue.DataType(), []model.DataType{partitionValue}) + pvc := model.NewVector(dtl) - if len(indexes) > 0 { - goroutineInd = indexes[0] - } else { - return 0, errors.New("failed to obtain the partition scheme") + indexes, err := mtt.partitionDomain.GetPartitionKeys(pvc) + if err != nil { + fmt.Printf("Failed to call GetPartitionKeys: %s\n", err.Error()) + return 0, err + } + + if len(indexes) > 0 { + goroutineInd = indexes[0] + } else { + return 0, errors.New("failed to obtain the partition scheme") + } } } else { goroutineInd = 0 @@ -552,29 +667,34 @@ func (mtt *MultiGoroutineTable) getGoroutineIndForPartitionTable(prow []model.Da return goroutineInd, nil } -func (mtt *MultiGoroutineTable) getGoroutineIndForNonPartitionTable(prow []model.DataType) int { +func (mtt *MultiGoroutineTable) getGoroutineIndForNonPartitionTable(prow []interface{}) (int, error) { var goroutineInd int if prow[mtt.goroutineByColIndexForNonPartition] != nil { s := prow[mtt.goroutineByColIndexForNonPartition] - dtl := model.NewDataTypeList(s.DataType(), []model.DataType{s}) + partitionValue, err := getDataType(model.DataTypeByte(mtt.colTypes[mtt.goroutineByColIndexForNonPartition]), s) + if err != nil { + return -1, err + } + dtl := model.NewDataTypeList(partitionValue.DataType(), []model.DataType{partitionValue}) pvc := model.NewVector(dtl) goroutineInd = pvc.HashBucket(0, len(mtt.goroutines)) } else { goroutineInd = 0 } - return goroutineInd + return goroutineInd, nil } -func (mtt *MultiGoroutineTable) getGoroutineInd(prow []model.DataType) (int, error) { +func (mtt *MultiGoroutineTable) getGoroutineInd(prow []interface{}) (int, error) { var goroutineInd int var err error if len(mtt.goroutines) > 1 { if mtt.isPartition { goroutineInd, err = mtt.getGoroutineIndForPartitionTable(prow) } else { - goroutineInd = mtt.getGoroutineIndForNonPartitionTable(prow) + goroutineInd, err = mtt.getGoroutineIndForNonPartitionTable(prow) } + goroutineInd = goroutineInd % len(mtt.goroutines) } else { goroutineInd = 0 } @@ -582,47 +702,22 @@ func (mtt *MultiGoroutineTable) getGoroutineInd(prow []model.DataType) (int, err return goroutineInd, err } -func (mtt *MultiGoroutineTable) insertGoroutineWrite(hashKey int, prow []model.DataType) { +func (mtt *MultiGoroutineTable) insertInterfaceToGoroutine(hashKey int, prow []interface{}) error { if hashKey < 0 { hashKey = 0 } ind := hashKey % len(mtt.goroutines) wt := mtt.goroutines[ind] - wt.writeQueue.add(prow) - - wt.signal.Signal() -} - -func (mtt *MultiGoroutineTable) getVector(records [][]model.DataType, ind int) (*model.Vector, error) { - dtArr := make([]model.DataType, len(records)) - dt := model.DataTypeByte(mtt.colTypes[ind]) - for k, row := range records { - if len(row) != len(mtt.colTypes) { - return nil, errors.New("column counts don't match") - } - - if !isEqualDataTypeByte(row[ind].DataType(), dt) { - return nil, fmt.Errorf("column doesn't match. Expect %s, but get %s", - model.GetDataTypeString(row[ind].DataType()), model.GetDataTypeString(dt)) - } - - dtArr[k] = row[ind] + err := wt.writeQueue.add(prow) + if err != nil { + return err } - dtl := model.NewDataTypeList(dt, dtArr) - return model.NewVector(dtl), nil + wt.signal.Signal() + return nil } -func (mtt *MultiGoroutineTable) isExist() bool { +func (mtt *MultiGoroutineTable) isExit() bool { return mtt.hasError } - -func isEqualDataTypeByte(a, b model.DataTypeByte) bool { - if a == b || (a == model.DtSymbol && b == model.DtString) || - (b == model.DtSymbol && a == model.DtString) { - return true - } - - return false -} diff --git a/multigoroutinetable/multi_goroutine_table_test.go b/multigoroutinetable/multi_goroutine_table_test.go index b936d3d..fc75fc9 100644 --- a/multigoroutinetable/multi_goroutine_table_test.go +++ b/multigoroutinetable/multi_goroutine_table_test.go @@ -7,8 +7,6 @@ import ( "testing" "time" - "github.com/dolphindb/api-go/model" - "github.com/stretchr/testify/assert" ) @@ -37,18 +35,18 @@ func TestMultiGoroutineTable(t *testing.T) { df := mtt.GetUnwrittenData() assert.Equal(t, len(df), 0) - date, err := model.NewDataType(model.DtDate, time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC)) - assert.Nil(t, err) + // date, err := model.NewDataType(model.DtDate, time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC)) + // assert.Nil(t, err) - sym, err := model.NewDataType(model.DtString, "insertFailed") - assert.Nil(t, err) + // sym, err := model.NewDataType(model.DtString, "insertFailed") + // assert.Nil(t, err) - err = mtt.InsertUnwrittenData([][]model.DataType{ - { - date, sym, - }, - }) - assert.Nil(t, err) + // err = mtt.InsertUnwrittenData([][]model.DataType{ + // { + // date, sym, + // }, + // }) + // assert.Nil(t, err) mtt.WaitForGoroutineCompletion() diff --git a/multigoroutinetable/queue.go b/multigoroutinetable/queue.go index bbf4730..6ba4b44 100644 --- a/multigoroutinetable/queue.go +++ b/multigoroutinetable/queue.go @@ -1,41 +1,313 @@ package multigoroutinetable import ( + "errors" + "fmt" + "math" "sync" + "time" "github.com/dolphindb/api-go/model" ) type queue struct { - buf [][]model.DataType - l int - lock sync.RWMutex + buf [][]interface{} + bufPool chan []interface{} + l int + lock sync.RWMutex + tableWriter *MultiGoroutineTable + lastLength int } -func newQueue(size int) *queue { +func newQueue(size int, tableWriter *MultiGoroutineTable) *queue { return &queue{ - buf: make([][]model.DataType, 0, size), - lock: sync.RWMutex{}, + buf: make([][]interface{}, 0, size), + bufPool: make(chan []interface{}, 10000), + lock: sync.RWMutex{}, + tableWriter: tableWriter, } } -func (q *queue) add(in []model.DataType) { +// every interface is a slice of basic type +func (q *queue) addBatch(in []interface{}, length int) { q.lock.Lock() + defer q.lock.Unlock() q.buf = append(q.buf, in) + q.l += length +} + +func (q *queue) makeQueueBuf(colTypes []int, batchSize int) []interface{} { + select { + case buf := <-q.bufPool: + return buf + default: + break + } + queueBuf := make([]interface{}, len(colTypes)) + for k, v := range colTypes { + switch model.DataTypeByte(v) { + case model.DtBool: + queueBuf[k] = make([]byte, 0, batchSize) + case model.DtBlob: + queueBuf[k] = make([][]byte, 0, batchSize) + case model.DtChar, model.DtCompress: + queueBuf[k] = make([]byte, 0, batchSize) + case model.DtComplex, model.DtPoint: + queueBuf[k] = make([][2]float64, 0, batchSize) + case model.DtShort: + queueBuf[k] = make([]int16, 0, batchSize) + case model.DtInt: + queueBuf[k] = make([]int32, 0, batchSize) + case model.DtLong: + queueBuf[k] = make([]int64, 0, batchSize) + case model.DtFloat: + queueBuf[k] = make([]float32, 0, batchSize) + case model.DtDouble: + queueBuf[k] = make([]float64, 0, batchSize) + case model.DtDecimal32: + queueBuf[k] = make([]*model.Decimal32, 0, batchSize) + case model.DtDecimal64: + queueBuf[k] = make([]*model.Decimal64, 0, batchSize) + case model.DtDecimal128: + queueBuf[k] = make([]*model.Decimal128, 0, batchSize) + case model.DtDate, model.DtDateHour, model.DtDateMinute, model.DtDatetime, model.DtMinute, model.DtMonth, model.DtNanoTime, model.DtSecond, model.DtTime, model.DtTimestamp, model.DtNanoTimestamp: + queueBuf[k] = make([]time.Time, 0, batchSize) + case model.DtUUID, model.DtSymbol, model.DtString, model.DtDuration, model.DtInt128, model.DtIP: + queueBuf[k] = make([]string, 0, batchSize) + case model.DtAny: + queueBuf[k] = make([]model.DataForm, 0, batchSize) + default: + // HACK other type should be DataType + queueBuf[k] = make([]model.DataType, 0, batchSize) + } + } + return queueBuf +} + +// every interface is a basic type +func (q *queue) add(in []interface{}) error { + q.lock.Lock() + defer q.lock.Unlock() + + batch := 65535 + if q.tableWriter.batchSize > batch { + batch = q.tableWriter.batchSize + } + if len(q.buf) == 0 { + q.buf = append(q.buf, q.makeQueueBuf(q.tableWriter.colTypes, batch)) + q.lastLength = 0 + } else if q.lastLength == batch { + q.buf = append(q.buf, q.makeQueueBuf(q.tableWriter.colTypes, batch)) + q.lastLength = 0 + } + + for ind, v := range in { + dt := model.DataTypeByte(q.tableWriter.colTypes[ind]) + if dt > 128 { + // TODO don't know the usage of type greater than 128 + continue + } else if dt > 64 { + var val model.DataType + var err error + if v == nil { + dtl := model.NewEmptyDataTypeList(dt, 1) + vct := model.NewVector(dtl) + val, err = model.NewDataType(model.DtAny, vct) + if err != nil { + return err + } + } else { + dtl, err := model.NewDataTypeListFromRawData(dt, v) + if err != nil { + return err + } + + if dtl.Len() == 0 { + dtl = model.NewEmptyDataTypeList(dt, 1) + } + + vct := model.NewVector(dtl) + val, err = model.NewDataType(model.DtAny, vct) + if err != nil { + return err + } + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]model.DataType), val) + continue + } + switch dt { + case model.DtBool: + if v == nil { + v = model.NullBool + } + var val byte + switch value := v.(type) { + case byte: + val = v.(byte) + case bool: + if value { + val = 1 + } else { + val = 0 + } + default: + return errors.New("the type of in must be bool when datatype is DtBool") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]byte), val) + case model.DtBlob: + if v == nil { + v = model.NullBlob + } + val, ok := v.([]byte) + if !ok { + return errors.New("the type of in must be []byte when datatype is DtBlob") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([][]byte), val) + case model.DtChar, model.DtCompress: + if v == nil { + v = model.NullBool + } + val, ok := v.(byte) + if !ok { + return errors.New("the type of in must be byte when datatype is DtChar or DtCompress") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]byte), val) + case model.DtComplex, model.DtPoint: + if v == nil { + var value [2]float64 + value[0] = -math.MaxFloat64 + value[1] = -math.MaxFloat64 + v = value + } + val, ok := v.([2]float64) + if !ok { + return errors.New("the type of in must be [2]float64 when datatype is DtComplex or DtPoint") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([][2]float64), val) + case model.DtShort: + if v == nil { + v = model.NullShort + } + val, ok := v.(int16) + if !ok { + return errors.New("the type of in must be int16 when datatype is DtShort") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]int16), val) + case model.DtInt: + if v == nil { + v = model.NullInt + } + val, ok := v.(int32) + if !ok { + return errors.New("the type of in must be int32 when datatype is DtInt") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]int32), val) + case model.DtLong: + if v == nil { + v = model.NullLong + } + val, ok := v.(int64) + if !ok { + return errors.New("the type of in must be int64 when datatype is DtLong") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]int64), val) + case model.DtFloat: + if v == nil { + v = model.NullFloat + } + val, ok := v.(float32) + if !ok { + return errors.New("the type of in must be float32 when datatype is DtFloat") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]float32), val) + case model.DtDouble: + if v == nil { + v = model.NullDouble + } + val, ok := v.(float64) + if !ok { + return errors.New("the type of in must be float64 when datatype is DtDouble") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]float64), val) + case model.DtDecimal32: + if v == nil { + value := &model.Decimal32{Scale: 6, Value: model.NullDecimal32Value} + v = value + } + val, ok := v.(*model.Decimal32) + if !ok { + return errors.New("the type of in must be *model.Decimal32 when datatype is DtDecimal32") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]*model.Decimal32), val) + case model.DtDecimal64: + if v == nil { + value := &model.Decimal64{Scale: 6, Value: model.NullDecimal64Value} + v = value + } + val, ok := v.(*model.Decimal64) + if !ok { + return errors.New("the type of in must be *model.Decimal64 when datatype is DtDecimal64") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]*model.Decimal64), val) + case model.DtDecimal128: + if v == nil { + value := &model.Decimal128{Scale: 6, Value: model.NullDecimal128Value} + v = value + } + val, ok := v.(*model.Decimal128) + if !ok { + return errors.New("the type of in must be *model.Decimal128 when datatype is DtDecimal128") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]*model.Decimal128), val) + case model.DtDate, model.DtDateHour, model.DtDateMinute, model.DtDatetime, model.DtMinute, model.DtMonth, model.DtNanoTime, model.DtSecond, model.DtTime, model.DtTimestamp, model.DtNanoTimestamp: + if v == nil { + v = model.NullTime + } + val, ok := v.(time.Time) + if !ok { + return errors.New("the type of in must be time.Time when datatype is " + model.GetDataTypeString(dt)) + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]time.Time), val) + case model.DtUUID, model.DtSymbol, model.DtString, model.DtDuration, model.DtInt128, model.DtIP: + if v == nil { + v = model.NullString + } + val, ok := v.(string) + if !ok { + return errors.New("the type of in must be string when datatype is " + model.GetDataTypeString(dt)) + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]string), val) + case model.DtAny: + if v == nil { + v = model.NullAny + } + val, ok := v.(model.DataForm) + if !ok { + return errors.New("the type of in must be model.DataForm when datatype is DtAny") + } + q.buf[len(q.buf)-1][ind] = append(q.buf[len(q.buf)-1][ind].([]model.DataForm), val) + + default: + return fmt.Errorf("invalid DataType %d", dt) + } + } + q.lastLength++ q.l++ - q.lock.Unlock() + return nil } -func (q *queue) load() []model.DataType { - if q.len() == 0 { +func (q *queue) popAll() [][]interface{} { + if len(q.buf) == 0 { return nil } q.lock.Lock() - res := q.buf[0] - q.buf = q.buf[1:] - q.l-- - q.lock.Unlock() - return res + defer q.lock.Unlock() + ret := make([][]interface{}, len(q.buf)) + copy(ret, q.buf) + q.buf = make([][]interface{}, 0, 32) + q.l = 0 + q.lastLength = 0 + return ret } func (q *queue) len() int { diff --git a/multigoroutinetable/writer_goroutine.go b/multigoroutinetable/writer_goroutine.go index 46ffef1..89b09b4 100644 --- a/multigoroutinetable/writer_goroutine.go +++ b/multigoroutinetable/writer_goroutine.go @@ -27,14 +27,18 @@ type writerGoroutine struct { } func newWriterGoroutine(goroutineIndex int, mtw *MultiGoroutineTable, conn dialer.Conn) *writerGoroutine { + batch := 65535 + if mtw.batchSize > batch { + batch = mtw.batchSize + } res := &writerGoroutine{ goroutineIndex: goroutineIndex, Conn: conn, tableWriter: mtw, signal: sync.NewCond(&sync.Mutex{}), exit: make(chan bool), - writeQueue: newQueue(mtw.batchSize), - failedQueue: newQueue(mtw.batchSize), + writeQueue: newQueue(batch, mtw), + failedQueue: newQueue(batch, mtw), } res.initScript() @@ -54,16 +58,16 @@ func (w *writerGoroutine) run() { w.signal.Wait() w.signal.L.Unlock() if !w.isExit() && w.tableWriter.batchSize > 1 && w.tableWriter.throttle > 0 { - if !w.isExit() && w.writeQueue.len() < w.tableWriter.batchSize { - time.Sleep(time.Duration(w.tableWriter.throttle) * time.Millisecond) + for !w.isExit() { + if w.writeQueue.len() < w.tableWriter.batchSize { + time.Sleep(time.Duration(w.tableWriter.throttle) * time.Millisecond) + } + w.writeAllData() } } - - for !w.isExit() && w.writeAllData() { - } } - for !w.tableWriter.isExist() && w.writeAllData() { + for !w.tableWriter.isExit() && w.writeAllData() { } w.isFinished = true @@ -86,12 +90,7 @@ func (w *writerGoroutine) initScript() { } func (w *writerGoroutine) writeAllData() bool { - items := make([][]model.DataType, 0) - for w.writeQueue.len() > 0 { - if val := w.writeQueue.load(); val != nil { - items = append(items, val) - } - } + items := w.writeQueue.popAll() if size := len(items); size < 1 { return false @@ -99,30 +98,34 @@ func (w *writerGoroutine) writeAllData() bool { defer w.handlePanic(items) - addRowCount := len(items) - writeTable, isWriteDone := w.generateWriteTable(items) - if isWriteDone && writeTable != nil && addRowCount > 0 { - err := w.runScript(writeTable, addRowCount) - if err != nil { - isWriteDone = false - w.handleError(err.Error()) + for _, v := range items { + isWriteDone := true + writeTable, addRowCount, newItems := w.generateWriteTableFromInterface(v) + if writeTable != nil && addRowCount > 0 { + err := w.runScript(writeTable, addRowCount) + if err != nil { + isWriteDone = false + w.handleError(err.Error()) + } + } + select { + case w.writeQueue.bufPool <- newItems: + default: } - } - if !isWriteDone { - for _, v := range items { - w.failedQueue.add(v) + if !isWriteDone { + w.failedQueue.addBatch(v, addRowCount) } } return true } -func (w *writerGoroutine) handlePanic(items [][]model.DataType) { +func (w *writerGoroutine) handlePanic(items [][]interface{}) { re := recover() if re != nil { for _, v := range items { - w.failedQueue.add(v) + w.failedQueue.addBatch(v, 0) // FIXME } buf := make([]byte, 4096) @@ -170,6 +173,204 @@ func (w *writerGoroutine) generateTableCols(items [][]model.DataType) []*model.V return colValues } +func (w *writerGoroutine) generateWriteTableFromInterface(items []interface{}) (*model.Table, int, []interface{}) { + count := 0 + // for column + colValues := make([]*model.Vector, len(w.tableWriter.colTypes)) + newItems := make([]interface{}, len(items)) + for ind, dtValue := range w.tableWriter.colTypes { + var vct *model.Vector + var dtl model.DataTypeList + var err error + dt := model.DataTypeByte(dtValue) + switch { + case dt >= 128: + //FIXME + dtl := model.NewEmptyDataTypeList(model.DataTypeByte(dt-128), len(items)) + vct = model.NewVector(dtl) + case dt >= 64: + vl := make([]*model.Vector, 0) + vec := items[ind].([]model.DataType) + + for i := 0; i < len(vec); i++ { + item := vec[i].Value().(*model.Vector) + vl = append(vl, item) + } + av := model.NewArrayVector(vl) + vct = model.NewVectorWithArrayVector(av) + count = len(vec) + newItems[ind] = vec[:0] + case dt == model.DtBool: + vec := items[ind].([]byte) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtBlob: + vec := items[ind].([][]byte) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtChar || dt == model.DtCompress: + vec := items[ind].([]byte) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtComplex || dt == model.DtPoint: + vec := items[ind].([][2]float64) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case (dt >= model.DtDate && dt <= model.DtNanoTimestamp) || (dt >= model.DtDateHour && dt <= model.DtDateMinute): + vec := items[ind].([]time.Time) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtShort: + vec := items[ind].([]int16) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtInt: + vec := items[ind].([]int32) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtLong: + vec := items[ind].([]int64) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtFloat: + vec := items[ind].([]float32) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtDouble: + vec := items[ind].([]float64) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + + case dt == model.DtDecimal32: + vec := items[ind].([]*model.Decimal32) + count = len(vec) + dtl = model.NewEmptyDataTypeList(dt, count) + for ind, v := range vec { + err := dtl.SetWithRawData(ind, v) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtDecimal64: + vec := items[ind].([]*model.Decimal64) + count = len(vec) + dtl = model.NewEmptyDataTypeList(dt, count) + for ind, v := range vec { + err := dtl.SetWithRawData(ind, v) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtDecimal128: + vec := items[ind].([]*model.Decimal128) + count = len(vec) + dtl = model.NewEmptyDataTypeList(dt, count) + for ind, v := range vec { + err := dtl.SetWithRawData(ind, v) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + + case dt == model.DtUUID || dt == model.DtString || dt == model.DtSymbol || dt == model.DtDuration || dt == model.DtInt128 || dt == model.DtIP: + vec := items[ind].([]string) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + case dt == model.DtAny: + vec := items[ind].([]model.DataForm) + count = len(vec) + dtl, err = model.NewDataTypeListFromRawData(dt, vec) + if err != nil { + w.handleError(err.Error()) + return nil, -1, nil + } + vct = model.NewVector(dtl) + newItems[ind] = vec[:0] + default: + return nil, -1, nil + } + colValues[ind] = vct + } + items = nil + + return model.NewTable(w.tableWriter.colNames, colValues), count, newItems +} + func (w *writerGoroutine) generateWriteTable(items [][]model.DataType) (*model.Table, bool) { isWriteDone := true colValues := w.generateTableCols(items) diff --git a/streaming/goroutine_client.go b/streaming/goroutine_client.go index 73e8c3b..2c8f9a7 100644 --- a/streaming/goroutine_client.go +++ b/streaming/goroutine_client.go @@ -59,7 +59,6 @@ func (t *GoroutineClient) subscribe(req *SubscribeRequest) error { return err } - fmt.Println("real subscribe") queue, err := t.subscribeInternal(req) if err != nil { return err diff --git a/test/connectionPool_test.go b/test/connectionPool_test.go index 3552c03..9f7b6fb 100644 --- a/test/connectionPool_test.go +++ b/test/connectionPool_test.go @@ -1860,3 +1860,83 @@ func TestConnnectionPoolHighAvailability(t *testing.T) { // }) } + +func TestConnnectionPooltimeOut(t *testing.T) { + t.Parallel() + Convey("TestConnnectionPooltimeOut_timeoutOption", t, func() { + opt := &api.PoolOption{ + Address: setup.Address4, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 10, + // Timeout: 1 * time.Second, // use default timeout + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + defer pool.Close() + tasks := make([]*api.Task, 1) + tasks[0] = &api.Task{Script: "sleep(62000);go;1+1"} + err = pool.Execute(tasks) + So(err, ShouldBeNil) + if tasks[0].GetError() != nil { + threadErr := tasks[0].GetError().Error() + So(threadErr, ShouldContainSubstring, "timeout") + } + }) + Convey("TestConnnectionPooltimeOut_RefreshTimeout", t, func() { + opt := &api.PoolOption{ + Address: setup.Address4, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 10, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + pool.RefreshTimeout(1 * time.Second) + + defer pool.Close() + tasks := make([]*api.Task, 10) + for i := 0; i < 10; i++ { + if i > 4 { + tasks[i] = &api.Task{Script: "sleep(2000);go;1+1"} + continue + } + tasks[i] = &api.Task{Script: "1+1"} + } + err = pool.Execute(tasks) + So(err, ShouldBeNil) + for i := 0; i < 10; i++ { + succeed := false + for { + if tasks[i].IsSuccess(){ + succeed = true + break + }else{ + time.Sleep(3 * time.Second) + break + } + } + if succeed { + re := tasks[i].GetResult() + So(re.(*model.Scalar).Value().(int32), ShouldEqual, int32(2)) + }else{ + threadErr := tasks[i].GetError().Error() + So(threadErr, ShouldContainSubstring, "timeout") + } + } + }) + + Convey("TestConnnectionPooltimeOut_exception", t, func() { + opt := &api.PoolOption{ + Address: setup.Address4, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 10, + Timeout: -100 * time.Second, + } + _, err := api.NewDBConnectionPool(opt) + So(err.Error(), ShouldContainSubstring, "Timeout must be equal or greater than 0") + }) + + +} diff --git a/test/loadTable_test.go b/test/loadTable_test.go index 14d9847..376333e 100644 --- a/test/loadTable_test.go +++ b/test/loadTable_test.go @@ -12,6 +12,7 @@ import ( ) var host9 = getRandomClusterAddress() + func TestLoadTable(t *testing.T) { t.Parallel() Convey("Test LoadTable prepare", t, func() { @@ -47,7 +48,7 @@ func TestLoadTable(t *testing.T) { So(err, ShouldBeNil) _, err = ddb.RunScript(fmt.Sprintf(`select * from %s`, "''")) So(err, ShouldNotBeNil) - So(err.Error(), ShouldEqual, `client error response. select * from "" => FROM clause must return a table.`) + So(err.Error(), ShouldContainSubstring, `RefId:S02033`) exTmp := tmp.(*model.Table) reTmp, err := LoadTable(ddb, TbName1, DfsDBPath) So(err, ShouldBeNil) @@ -449,5 +450,25 @@ func TestLoadTable(t *testing.T) { re1 := CompareTablesDataformTable(exTmp, reTmp) So(re1, ShouldBeTrue) }) + Convey("Test_LoadTable_more_than_once", func() { + dbName := "dfs://test_" + generateRandomString(5) + tbName := "test_table" + _, err := ddb.RunScript(`db = database("` + dbName + `", VALUE, 1..5); + t = table(1 2 3 as c1, rand(100.00, 3) as c2); + db.createPartitionedTable(t, '` + tbName + `', 'c1').append!(t)`) + So(err, ShouldBeNil) + l := &api.LoadTableRequest{ + Database: dbName, + TableName: tbName, + } + for i := 0; i < 1000; i++ { + lt, err := ddb.LoadTable(l) + So(err, ShouldBeNil) + s := lt.GetHandle() + res, _ := ddb.RunScript("select * from " + s) + ex, _ := ddb.RunScript(s + `=select * from loadTable("` + dbName + `", "` + tbName + `");` + s) + So(res, ShouldEqual, ex.(*model.Table)) + } + }) }) } diff --git a/test/multigoroutinetable_test.go b/test/multigoroutinetable_test.go index 9e08d65..db92a0a 100644 --- a/test/multigoroutinetable_test.go +++ b/test/multigoroutinetable_test.go @@ -134,7 +134,7 @@ func threadinsertData(mtt *mtw.MultiGoroutineTable, n int) { fmt.Println(err) break } - if i == n-1 && err == nil { + if i == n-1 { break } i++ @@ -212,7 +212,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -237,7 +237,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -262,7 +262,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -287,7 +287,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -312,7 +312,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: "dhb", TableName: DfsTableName1, @@ -337,7 +337,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: "", TableName: DfsTableName1, @@ -362,7 +362,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: "hsb", @@ -387,7 +387,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: "", @@ -462,7 +462,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 0, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -487,7 +487,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: -1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -512,7 +512,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 0, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -537,7 +537,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: -3, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -571,7 +571,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -598,7 +598,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "", Database: "", TableName: "shareTable", @@ -625,7 +625,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "mt", Database: DBdfsPath, TableName: DfsTableName1, @@ -650,7 +650,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "", Database: DBdfsPath, TableName: DfsTableName1, @@ -675,7 +675,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 3, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "id", Database: DBdfsPath, TableName: DfsTableName1, @@ -700,7 +700,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -728,7 +728,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "id", Database: "", TableName: "", @@ -755,7 +755,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -782,7 +782,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -809,7 +809,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -829,7 +829,7 @@ func TestMultiGoroutineTable_exception(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "id", Database: "", TableName: "t1", @@ -839,27 +839,27 @@ func TestMultiGoroutineTable_exception(t *testing.T) { } mtt, err := mtw.NewMultiGoroutineTable(opt) So(err, ShouldBeNil) - tb := make([][]model.DataType, 0) + tb := make([][]interface{}, 0) + _tb := make([]interface{}, 0) + c1 := make([]time.Time, 0) + c2 := make([]int32, 0) + c3 := make([]int32, 0) for i := 0; i < 3; i++ { - rowData := make([]model.DataType, 0) - dt1, _ := model.NewDataType(model.DtDate, time.Date(2022, time.Month(1), i, 1, 1, 0, 0, time.UTC)) - rowData = append(rowData, dt1) - dt2, _ := model.NewDataType(model.DtString, "AAOL") - rowData = append(rowData, dt2) - dt3, _ := model.NewDataType(model.DtInt, int32(16+i)) - rowData = append(rowData, dt3) - tb = append(tb, rowData) + dt1 := time.Date(2022, time.Month(1), i, 1, 1, 0, 0, time.UTC) + c1 = append(c1, dt1) + dt2 := int32(i) + c2 = append(c2, dt2) + dt3 := int32(16 + i) + c3 = append(c3, dt3) } + _tb = append(_tb, c1) + _tb = append(_tb, c2) + _tb = append(_tb, c3) + tb = append(tb, _tb) err = mtt.InsertUnwrittenData(tb) - So(err, ShouldBeNil) + So(err.Error(), ShouldContainSubstring, "col 1 of type symbol expect string slice") mtt.WaitForGoroutineCompletion() - errmsg := mtt.GetStatus().ErrMsg - So(errmsg, ShouldEqual, "failed to set DataType(date) into DataTypeList(timestamp)") - unSetRows := mtt.GetStatus().UnSentRows - FailedRows := mtt.GetStatus().FailedRows - So(FailedRows+unSetRows, ShouldEqual, 3) - IsExit := mtt.GetStatus().IsExit - So(IsExit, ShouldEqual, true) + So(mtt.GetStatus().IsExit, ShouldBeTrue) }) Convey("TestMultiGoroutineTable_insert_dfs_value_value_ex", func() { script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + @@ -908,7 +908,7 @@ func TestMultiGoroutineTable_all_data_type(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 2, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "all_data_type", @@ -999,13 +999,13 @@ func TestMultiGoroutineTable_GoroutineCount(t *testing.T) { Convey("test_multithreadTableWriterTest_GoroutineCount", t, func() { ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), host12, setup.UserName, setup.Password) So(err, ShouldBeNil) - scriptGoroutineCount := "t = table(1000:0, `date`id`values,[TIMESTAMP,SYMBOL,INT]);share t as t1;" - _, err = ddb.RunScript(scriptGoroutineCount) + s := "t = table(1:0, `date`id`values,[TIMESTAMP,SYMBOL,INT]);share t as t1;" + _, err = ddb.RunScript(s) So(err, ShouldBeNil) opt := &mtw.Option{ - GoroutineCount: 2, - BatchSize: 1, - Throttle: 1, + GoroutineCount: 5, + BatchSize: 10, + Throttle: 1000, PartitionCol: "id", Database: "", TableName: "t1", @@ -1015,50 +1015,16 @@ func TestMultiGoroutineTable_GoroutineCount(t *testing.T) { } mtt, err := mtw.NewMultiGoroutineTable(opt) So(err, ShouldBeNil) - err = mtt.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), "AAOL", int32(45)) - So(err, ShouldBeNil) - err = mtt.Insert(time.Date(1969, time.Month(1), 1, 1, 1, 0, 0, time.UTC), "ONSL", int32(45)) - So(err, ShouldBeNil) - tb := make([][]model.DataType, 0) - for i := 0; i < 3; i++ { - rowData := make([]model.DataType, 0) - dt1, _ := model.NewDataType(model.DtTimestamp, time.Date(2022, time.Month(1), i, 1, 1, 0, 0, time.UTC)) - rowData = append(rowData, dt1) - dt2, _ := model.NewDataType(model.DtString, "AAOL") - rowData = append(rowData, dt2) - dt3, _ := model.NewDataType(model.DtInt, int32(16+i)) - rowData = append(rowData, dt3) - tb = append(tb, rowData) - } - err = mtt.InsertUnwrittenData(tb) - So(err, ShouldBeNil) - time.Sleep(3 * time.Second) - re, err := ddb.RunScript("select * from t1") - So(err, ShouldBeNil) - reTable := re.(*model.Table) - So(reTable.Rows()+mtt.GetStatus().UnSentRows+mtt.GetStatus().FailedRows, ShouldEqual, 5) - tb = make([][]model.DataType, 0) - for i := 0; i < 3; i++ { - rowData := make([]model.DataType, 0) - dt1, _ := model.NewDataType(model.DtTimestamp, time.Date(2022, time.Month(1), i, 1, 1, 0, 0, time.UTC)) - rowData = append(rowData, dt1) - dt2, _ := model.NewDataType(model.DtString, "ONSL") - rowData = append(rowData, dt2) - dt3, _ := model.NewDataType(model.DtInt, int32(16+i)) - rowData = append(rowData, dt3) - tb = append(tb, rowData) + for i := 0; i < 100; i++ { + err = mtt.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), "sym"+strconv.Itoa(i), int32(i)) + So(err, ShouldBeNil) } - err = mtt.InsertUnwrittenData(tb) - So(err, ShouldBeNil) mtt.WaitForGoroutineCompletion() - re, err = ddb.RunScript("select * from t1") - So(err, ShouldBeNil) - reTable = re.(*model.Table) - So(reTable.Rows(), ShouldEqual, 8) - _, err = ddb.RunScript("undef(`t1,SHARED)") - So(err, ShouldBeNil) - err = ddb.Close() - So(err, ShouldBeNil) + status := mtt.GetStatus() + So(status.FailedRows, ShouldEqual, 0) + So(status.ErrMsg, ShouldEqual, "") + So(status.IsExit, ShouldBeTrue) + So(len(status.GoroutineStatus), ShouldEqual, 5) }) } @@ -1075,7 +1041,7 @@ func TestMultiGoroutineTable_null(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "boolv", Database: "", TableName: "t1", @@ -1105,7 +1071,7 @@ func TestMultiGoroutineTable_null(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "boolv", Database: "", TableName: "t1", @@ -1140,7 +1106,7 @@ func TestMultiGoroutineTable_getStatus_write_successful(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: "", TableName: "t1", @@ -1221,7 +1187,7 @@ func TestMultiGoroutineTable_insert_bool(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "bool", Database: "", TableName: "t1", @@ -1264,7 +1230,7 @@ func TestMultiGoroutineTable_insert_byte_int32_int64_int16(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "id", Database: "", TableName: "t1", @@ -1308,7 +1274,7 @@ func TestMultiGoroutineTable_insert_float32_float64(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "id", Database: "", TableName: "t1", @@ -1350,7 +1316,7 @@ func TestMultiGoroutineTable_streamTable_insert_timetype(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 5, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: "", TableName: "t1", @@ -1437,7 +1403,7 @@ func TestMultiGoroutineTable_memTable_insert_timetype(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 5, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: "", TableName: "t1", @@ -1528,7 +1494,7 @@ func TestMultiGoroutineTable_dfsTable_insert_timetype(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 5, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: DBdfsPath, TableName: DfsTableName1, @@ -1619,7 +1585,7 @@ func TestMultiGoroutineTable_dimensionTable_insert_timetype(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 5, - Throttle: 1, + Throttle: 1000, PartitionCol: "", Database: DBdfsPath, TableName: DfsTableName1, @@ -1706,7 +1672,7 @@ func TestMultiGoroutineTable_memTable_insert_localTime(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 5, - Throttle: 1, + Throttle: 1000, PartitionCol: "datev", Database: "", TableName: "t1", @@ -1797,7 +1763,7 @@ func TestMultiGoroutineTable_insert_dfs_part_null(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 5, - Throttle: 1, + Throttle: 1000, PartitionCol: "boolv", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -1855,7 +1821,7 @@ func TestMultiGoroutineTable_insert_empty_arrayVector(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -1894,7 +1860,7 @@ func TestMultiGoroutineTable_insert_arrayVector_different_length(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -1939,7 +1905,7 @@ func TestMultiGoroutineTable_insert_arrayVector_char(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -1980,7 +1946,7 @@ func TestMultiGoroutineTable_insert_arrayVector_int(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -2021,7 +1987,7 @@ func TestMultiGoroutineTable_insert_arrayVector_bool(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -2062,7 +2028,7 @@ func TestMultiGoroutineTable_insert_arrayVector_long(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -2105,7 +2071,7 @@ func TestMultiGoroutineTable_insert_arrayVector_short(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -2148,7 +2114,7 @@ func TestMultiGoroutineTable_insert_arrayVector_float(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -2189,7 +2155,7 @@ func TestMultiGoroutineTable_insert_arrayVector_double(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -2231,7 +2197,7 @@ func TestMultiGoroutineTable_insert_arrayVector_date_month(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "Arr1", Database: "", TableName: "t1", @@ -2274,7 +2240,7 @@ func TestMultiGoroutineTable_insert_arrayVector_time_minute_month(t *testing.T) opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "Arr1", Database: "", TableName: "t1", @@ -2319,7 +2285,7 @@ func TestMultiGoroutineTable_insert_arrayVector_datetime_timestamp_nanotime_nano opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "Arr1", Database: "", TableName: "t1", @@ -2363,10 +2329,11 @@ func TestMultiGoroutineTable_insert_arrayVector_otherType(t *testing.T) { "share t as t1" _, err = ddb.RunScript(scriptGoroutineCount) So(err, ShouldBeNil) + opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "uuidv", Database: "", TableName: "t1", @@ -2384,6 +2351,7 @@ func TestMultiGoroutineTable_insert_arrayVector_otherType(t *testing.T) { re, err := ddb.RunScript("select * from t1;") So(err, ShouldBeNil) reTable := re.(*model.Table) + fmt.Println(reTable, reTable.Rows()) So(reTable.Rows(), ShouldEqual, 2) reArray1v := reTable.GetColumnByName("uuidv") reArray2v := reTable.GetColumnByName("int128v") @@ -2409,7 +2377,7 @@ func TestMultiGoroutineTable_insert_blob(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "intv", Database: "", TableName: "t1", @@ -2433,21 +2401,21 @@ func TestMultiGoroutineTable_insert_blob(t *testing.T) { }) } -func TestMultiGoroutineTable_insert_wrong_type(t *testing.T) { - Convey("TestMultiGoroutineTable_insert_arrayVector_otherType", t, func() { +func TestMultiGoroutineTable_insert_arrayVector_wrong_type(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_wrong_type", t, func() { ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), host12, setup.UserName, setup.Password) So(err, ShouldBeNil) defer ddb.Close() - scriptGoroutineCount := "t = streamTable(1000:0, `intv`doublev," + - "[INT,DOUBLE]);" + + s := "t = streamTable(1000:0, `intv`doublev," + + "[INT[],DOUBLE[]]);" + "share t as t1" - _, err = ddb.RunScript(scriptGoroutineCount) + _, err = ddb.RunScript(s) So(err, ShouldBeNil) opt := &mtw.Option{ GoroutineCount: 1, - BatchSize: 1, - Throttle: 1, - PartitionCol: "intv", + BatchSize: 10, + Throttle: 1000, + PartitionCol: "", Database: "", TableName: "t1", UserID: setup.UserName, @@ -2456,26 +2424,9 @@ func TestMultiGoroutineTable_insert_wrong_type(t *testing.T) { } mtt, err := mtw.NewMultiGoroutineTable(opt) So(err, ShouldBeNil) - tb := make([][]model.DataType, 0) - for i := 0; i < 1; i++ { - rowData := make([]model.DataType, 0) - dt1, _ := model.NewDataType(model.DtInt, int32(16+i)) - rowData = append(rowData, dt1) - dt2, _ := model.NewDataType(model.DtInt, int32(i)) - rowData = append(rowData, dt2) - tb = append(tb, rowData) - } - err = mtt.InsertUnwrittenData(tb) - So(err, ShouldBeNil) - mtt.WaitForGoroutineCompletion() - So(mtt.GetStatus().ErrMsg, ShouldContainSubstring, "failed to set DataType(int) into DataTypeList(double)") - _, err = ddb.RunScript("select * from t1;") - So(err, ShouldBeNil) - status := mtt.GetStatus() - So(len(tb), ShouldEqual, status.UnSentRows+status.SentRows+status.FailedRows) - unwrittenData := mtt.GetUnwrittenData() - So(unwrittenData[0][0].Value(), ShouldEqual, int32(16)) - So(unwrittenData[0][1].Value(), ShouldEqual, int32(0)) + err = mtt.Insert([]int32{1, 2, 3}, []float32{1.1, 2.2, 3.3}) + So(err, ShouldNotBeNil) + So(err.Error(), ShouldContainSubstring, "the type of input must be []float64 when datatype is DtDouble") _, err = ddb.RunScript("undef(`t1,SHARED)") So(err, ShouldBeNil) }) @@ -2494,7 +2445,7 @@ func TestMultiGoroutineTable_insert_uuid_int128_ipaddr(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 1, - Throttle: 1, + Throttle: 1000, PartitionCol: "uuidv", Database: "", TableName: "t1", @@ -2504,17 +2455,23 @@ func TestMultiGoroutineTable_insert_uuid_int128_ipaddr(t *testing.T) { } mtt, err := mtw.NewMultiGoroutineTable(opt) So(err, ShouldBeNil) - tb := make([][]model.DataType, 0) + tb := make([][]interface{}, 0) + _tb := make([]interface{}, 0) + c1 := make([]string, 0) + c2 := make([]string, 0) + c3 := make([]string, 0) for i := 0; i < 3; i++ { - rowData := make([]model.DataType, 0) - dt1, _ := model.NewDataType(model.DtUUID, "00000000-0004-e72c-0000-000000007eb1") - rowData = append(rowData, dt1) - dt2, _ := model.NewDataType(model.DtIP, "192.168.100.20") - rowData = append(rowData, dt2) - dt3, _ := model.NewDataType(model.DtInt128, "e1671797c52e15f763380b45e841ec32") - rowData = append(rowData, dt3) - tb = append(tb, rowData) - } + dt1 := "00000000-0004-e72c-0000-000000007eb1" + c1 = append(c1, dt1) + dt2 := "192.168.100.20" + c2 = append(c2, dt2) + dt3 := "e1671797c52e15f763380b45e841ec32" + c3 = append(c3, dt3) + } + _tb = append(_tb, c1) + _tb = append(_tb, c2) + _tb = append(_tb, c3) + tb = append(tb, _tb) err = mtt.InsertUnwrittenData(tb) So(err, ShouldBeNil) mtt.WaitForGoroutineCompletion() @@ -2525,7 +2482,7 @@ func TestMultiGoroutineTable_insert_uuid_int128_ipaddr(t *testing.T) { So(reTable.GetColumnByName("int128v").String(), ShouldEqual, "vector([e1671797c52e15f763380b45e841ec32, e1671797c52e15f763380b45e841ec32, e1671797c52e15f763380b45e841ec32])") So(reTable.GetColumnByName("ipaddrv").String(), ShouldEqual, "vector([192.168.100.20, 192.168.100.20, 192.168.100.20])") status := mtt.GetStatus() - So(len(tb), ShouldEqual, status.UnSentRows+status.SentRows) + So(len(c1), ShouldEqual, status.UnSentRows+status.SentRows) _, err = ddb.RunScript("undef(`t1,SHARED)") So(err, ShouldBeNil) }) @@ -2543,7 +2500,7 @@ func TestMultiGoroutineTable_keytable(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "tradeDate", Database: "", TableName: "t1", @@ -2587,9 +2544,9 @@ func TestMultiGoroutineTable_insert_dt_multipleThreadCount(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt := &mtw.Option{ - GoroutineCount: 10, + GoroutineCount: 2, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "tradeDate", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -2621,9 +2578,9 @@ func TestMultiGoroutineTable_insert_tsdb_dt_multipleThreadCount(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt := &mtw.Option{ - GoroutineCount: 10, + GoroutineCount: 2, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "tradeDate", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -2656,7 +2613,7 @@ func TestMultiGoroutineTable_insert_dt_multipleThread_groutine(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "tradeDate", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -2708,7 +2665,7 @@ func TestMultiGoroutineTable_insert_dt_multipleThread_tsdb_groutine(t *testing.T opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "tradeDate", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -2760,7 +2717,7 @@ func TestMultiGoroutineTable_insert_dt_oneThread(t *testing.T) { opt := &mtw.Option{ GoroutineCount: 1, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "tradeDate", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -4084,8 +4041,8 @@ func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_sameTable So(err, ShouldBeNil) opt := &mtw.Option{ GoroutineCount: 2, - BatchSize: 10, - Throttle: 1, + BatchSize: 100000, + Throttle: 100, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -4097,22 +4054,30 @@ func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_sameTable So(err, ShouldBeNil) mtt2, err := mtw.NewMultiGoroutineTable(opt) So(err, ShouldBeNil) - tb1 := make([][]model.DataType, 0) - tb2 := make([][]model.DataType, 0) + tb1 := make([][]interface{}, 0) + tb2 := make([][]interface{}, 0) + _tb1 := make([]interface{}, 0) + _tb2 := make([]interface{}, 0) + c11 := make([]int32, 0) + c12 := make([]float64, 0) + c21 := make([]int32, 0) + c22 := make([]float64, 0) for i := 0; i < 1; i++ { - rowData1 := make([]model.DataType, 0) - rowData2 := make([]model.DataType, 0) - dt1, _ := model.NewDataType(model.DtInt, int32(1)) - rowData1 = append(rowData1, dt1) - dt2, _ := model.NewDataType(model.DtDouble, float64(12.9)) - rowData1 = append(rowData1, dt2) - dt3, _ := model.NewDataType(model.DtInt, int32(2)) - rowData2 = append(rowData2, dt3) - dt4, _ := model.NewDataType(model.DtDouble, float64(22.9)) - rowData2 = append(rowData2, dt4) - tb1 = append(tb1, rowData1) - tb2 = append(tb2, rowData2) - } + dt1 := int32(1) + c11 = append(c11, dt1) + dt2 := float64(12.9) + c12 = append(c12, dt2) + dt3 := int32(2) + c21 = append(c21, dt3) + dt4 := float64(22.9) + c22 = append(c22, dt4) + } + _tb1 = append(_tb1, c11) + _tb1 = append(_tb1, c12) + _tb2 = append(_tb2, c21) + _tb2 = append(_tb2, c22) + tb1 = append(tb1, _tb1) + tb2 = append(tb2, _tb2) for i := 0; i < 10; i++ { err = mtt1.InsertUnwrittenData(tb1) AssertNil(err) @@ -4183,9 +4148,9 @@ func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_different _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, + GoroutineCount: 2, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt1", @@ -4196,9 +4161,9 @@ func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_different mtt1, err := mtw.NewMultiGoroutineTable(opt1) So(err, ShouldBeNil) opt2 := &mtw.Option{ - GoroutineCount: 10, + GoroutineCount: 2, BatchSize: 30, - Throttle: 1, + Throttle: 1000, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt2", @@ -4209,9 +4174,9 @@ func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_different mtt2, err := mtw.NewMultiGoroutineTable(opt2) So(err, ShouldBeNil) opt3 := &mtw.Option{ - GoroutineCount: 10, + GoroutineCount: 2, BatchSize: 100, - Throttle: 1, + Throttle: 1000, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt3", @@ -4224,7 +4189,7 @@ func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_different opt4 := &mtw.Option{ GoroutineCount: 2, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt4", @@ -4234,15 +4199,19 @@ func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_different } mtt4, err := mtw.NewMultiGoroutineTable(opt4) So(err, ShouldBeNil) - tb := make([][]model.DataType, 0) + tb := make([][]interface{}, 0) + _tb := make([]interface{}, 0) + c1 := make([]int32, 0) + c2 := make([]float64, 0) for i := 0; i < 1; i++ { - rowData := make([]model.DataType, 0) - dt1, _ := model.NewDataType(model.DtInt, int32(16+i)) - rowData = append(rowData, dt1) - dt2, _ := model.NewDataType(model.DtDouble, float64(22.9)) - rowData = append(rowData, dt2) - tb = append(tb, rowData) - } + dt1 := int32(16 + i) + c1 = append(c1, dt1) + dt2 := float64(22.9) + c2 = append(c2, dt2) + } + _tb = append(_tb, c1) + _tb = append(_tb, c2) + tb = append(tb, _tb) for i := 0; i < 10; i++ { err = mtt1.InsertUnwrittenData(tb) AssertNil(err) @@ -4298,234 +4267,6 @@ func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_different So(err, ShouldBeNil) }) } - -func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_differentDatabase(t *testing.T) { - Convey("func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_differentDatabase", t, func() { - ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), host12, setup.UserName, setup.Password) - So(err, ShouldBeNil) - defer ddb.Close() - script1 := "\n" + - "Database = \"dfs://test_MultithreadedTableWriter1\"\n" + - "if(exists(Database)){\n" + - "\tdropDatabase(Database)\t\n" + - "}\n" + - "db=database(Database, VALUE, 1..5)\n" + - "t=table(1:0, `volume`valueTrade, [INT, DOUBLE])\n;share t as t1;" + - "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"volume\"]);\n" - _, err = ddb.RunScript(script1) - So(err, ShouldBeNil) - script2 := "\n" + - "Database = \"dfs://test_MultithreadedTableWriter2\"\n" + - "if(exists(Database)){\n" + - "\tdropDatabase(Database)\t\n" + - "}\n" + - "db=database(Database, VALUE, 1..5)\n" + - "t=table(1:0, `volume`valueTrade, [INT, DOUBLE])\n;share t as t1;" + - "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"volume\"]);\n" - _, err = ddb.RunScript(script2) - So(err, ShouldBeNil) - script3 := "\n" + - "Database = \"dfs://test_MultithreadedTableWriter3\"\n" + - "if(exists(Database)){\n" + - "\tdropDatabase(Database)\t\n" + - "}\n" + - "db=database(Database, VALUE, 1..5)\n" + - "t=table(1:0, `volume`valueTrade, [INT, DOUBLE])\n;share t as t1;" + - "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"volume\"]);\n" - _, err = ddb.RunScript(script3) - So(err, ShouldBeNil) - opt1 := &mtw.Option{ - GoroutineCount: 20, - BatchSize: 10, - Throttle: 1, - PartitionCol: "volume", - Database: "dfs://test_MultithreadedTableWriter1", - TableName: "pt1", - UserID: setup.UserName, - Password: setup.Password, - Address: host12, - } - mtt1, err := mtw.NewMultiGoroutineTable(opt1) - So(err, ShouldBeNil) - opt2 := &mtw.Option{ - GoroutineCount: 10, - BatchSize: 30, - Throttle: 1, - PartitionCol: "volume", - Database: "dfs://test_MultithreadedTableWriter2", - TableName: "pt1", - UserID: setup.UserName, - Password: setup.Password, - Address: host12, - } - mtt2, err := mtw.NewMultiGoroutineTable(opt2) - So(err, ShouldBeNil) - opt3 := &mtw.Option{ - GoroutineCount: 10, - BatchSize: 100, - Throttle: 1, - PartitionCol: "volume", - Database: "dfs://test_MultithreadedTableWriter3", - TableName: "pt1", - UserID: setup.UserName, - Password: setup.Password, - Address: host12, - } - mtt3, err := mtw.NewMultiGoroutineTable(opt3) - So(err, ShouldBeNil) - tb := make([][]model.DataType, 0) - for i := 0; i < 1; i++ { - rowData := make([]model.DataType, 0) - dt1, _ := model.NewDataType(model.DtInt, int32(16+i)) - rowData = append(rowData, dt1) - dt2, _ := model.NewDataType(model.DtDouble, float64(22.9)) - rowData = append(rowData, dt2) - tb = append(tb, rowData) - } - for i := 0; i < 10; i++ { - err = mtt1.InsertUnwrittenData(tb) - AssertNil(err) - err = mtt2.InsertUnwrittenData(tb) - AssertNil(err) - err = mtt3.InsertUnwrittenData(tb) - AssertNil(err) - } - for j := 0; j < 10; j++ { - var intarr []int32 - var floatarr1 []float64 - for i := 0; i < 1; i++ { - floatarr1 = append(floatarr1, float64(22.9)) - intarr = append(intarr, int32(16)) - } - valueTrade, _ := model.NewDataTypeListFromRawData(model.DtDouble, floatarr1) - volume, _ := model.NewDataTypeListFromRawData(model.DtInt, intarr) - tmp := model.NewTable([]string{"volume", "valueTrade"}, - []*model.Vector{model.NewVector(volume), model.NewVector(valueTrade)}) - _, err = ddb.RunFunc("tableInsert{t1}", []model.DataForm{tmp}) - AssertNil(err) - } - mtt1.WaitForGoroutineCompletion() - mtt2.WaitForGoroutineCompletion() - mtt3.WaitForGoroutineCompletion() - re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter1',`pt1) order by volume,valueTrade") - So(err, ShouldBeNil) - re2, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter2',`pt1) order by volume,valueTrade") - So(err, ShouldBeNil) - re3, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter3',`pt1) order by volume,valueTrade") - So(err, ShouldBeNil) - ex, err := ddb.RunScript("select * from t1 order by volume,valueTrade") - So(err, ShouldBeNil) - reTable1 := re1.(*model.Table) - reTable2 := re2.(*model.Table) - reTable3 := re3.(*model.Table) - exTable := ex.(*model.Table) - for i := 0; i < len(reTable1.GetColumnNames()); i++ { - So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) - So(reTable2.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) - So(reTable3.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) - } - _, err = ddb.RunScript("undef(`t1, SHARED)") - So(err, ShouldBeNil) - _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter1\")") - So(err, ShouldBeNil) - _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter2\")") - So(err, ShouldBeNil) - _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter3\")") - So(err, ShouldBeNil) - }) -} - -func TestMultiGoroutineTable_insert_differentTable_status_isExiting(t *testing.T) { - Convey("func TestMultiGoroutineTable_insert_differentTable_status_isExiting", t, func() { - ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), host12, setup.UserName, setup.Password) - So(err, ShouldBeNil) - defer ddb.Close() - script := "tmp1=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL,TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tmp1 as st1;" + - "tmp2=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL,TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tmp2 as st2;" + - "tmp3=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL,TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tmp3 as st3;" - _, err = ddb.RunScript(script) - So(err, ShouldBeNil) - opt1 := &mtw.Option{ - GoroutineCount: 20, - BatchSize: 10, - Throttle: 1, - PartitionCol: "volume", - Database: "", - TableName: "st1", - UserID: setup.UserName, - Password: setup.Password, - Address: host12, - } - mtt1, err := mtw.NewMultiGoroutineTable(opt1) - So(err, ShouldBeNil) - opt2 := &mtw.Option{ - GoroutineCount: 10, - BatchSize: 30, - Throttle: 1, - PartitionCol: "volume", - Database: "", - TableName: "st2", - UserID: setup.UserName, - Password: setup.Password, - Address: host12, - } - mtt2, err := mtw.NewMultiGoroutineTable(opt2) - So(err, ShouldBeNil) - opt3 := &mtw.Option{ - GoroutineCount: 10, - BatchSize: 100, - Throttle: 1, - PartitionCol: "volume", - Database: "", - TableName: "st3", - UserID: setup.UserName, - Password: setup.Password, - Address: host12, - } - mtt3, err := mtw.NewMultiGoroutineTable(opt3) - So(err, ShouldBeNil) - n := 100 - for i := 0; i < 10; i++ { - waitGroup.Add(1) - go threadinsertData(mtt1, n) - waitGroup.Add(1) - go threadinsertData(mtt2, n) - waitGroup.Add(1) - go threadinsertData(mtt3, n) - insertDataTotable(n, "st1") - insertDataTotable(n, "st2") - insertDataTotable(n, "st3") - } - waitGroup.Wait() - mtt1.WaitForGoroutineCompletion() - mtt2.WaitForGoroutineCompletion() - mtt3.WaitForGoroutineCompletion() - re1, err := ddb.RunScript("select * from tmp1 order by volume,valueTrade;") - So(err, ShouldBeNil) - re2, err := ddb.RunScript("select * from tmp2 order by volume,valueTrade;") - So(err, ShouldBeNil) - re3, err := ddb.RunScript("select * from tmp3 order by volume,valueTrade;") - So(err, ShouldBeNil) - ex, err := ddb.RunScript("select * from tmp1 order by volume,valueTrade;") - So(err, ShouldBeNil) - reTable1 := re1.(*model.Table) - reTable2 := re2.(*model.Table) - reTable3 := re3.(*model.Table) - exTable := ex.(*model.Table) - for i := 0; i < len(reTable1.GetColumnNames()); i++ { - So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) - So(reTable2.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) - So(reTable3.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) - } - _, err = ddb.RunScript("undef(`st1, SHARED)") - So(err, ShouldBeNil) - _, err = ddb.RunScript("undef(`st2, SHARED)") - So(err, ShouldBeNil) - _, err = ddb.RunScript("undef(`st3, SHARED)") - So(err, ShouldBeNil) - }) -} - func TestMultiGoroutineTable_insert_tsdb_keepDuplicates(t *testing.T) { Convey("func TestMultiGoroutineTable_insert_tsdb_keepDuplicates", t, func() { ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), host12, setup.UserName, setup.Password) @@ -4546,9 +4287,9 @@ func TestMultiGoroutineTable_insert_tsdb_keepDuplicates(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, - BatchSize: 10, - Throttle: 1, + GoroutineCount: 2, + BatchSize: 1000, + Throttle: 100, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt1", @@ -4559,9 +4300,9 @@ func TestMultiGoroutineTable_insert_tsdb_keepDuplicates(t *testing.T) { mtt1, err := mtw.NewMultiGoroutineTable(opt1) So(err, ShouldBeNil) opt2 := &mtw.Option{ - GoroutineCount: 10, + GoroutineCount: 2, BatchSize: 30, - Throttle: 1, + Throttle: 1000, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt2", @@ -4572,9 +4313,9 @@ func TestMultiGoroutineTable_insert_tsdb_keepDuplicates(t *testing.T) { mtt2, err := mtw.NewMultiGoroutineTable(opt2) So(err, ShouldBeNil) opt3 := &mtw.Option{ - GoroutineCount: 10, + GoroutineCount: 2, BatchSize: 100, - Throttle: 1, + Throttle: 1000, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt3", @@ -4587,7 +4328,7 @@ func TestMultiGoroutineTable_insert_tsdb_keepDuplicates(t *testing.T) { opt4 := &mtw.Option{ GoroutineCount: 1, BatchSize: 100, - Throttle: 1, + Throttle: 1000, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt4", @@ -4659,9 +4400,9 @@ func TestMultiGoroutineTable_insert_dfs_length_eq_1024(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, + GoroutineCount: 2, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -4712,9 +4453,9 @@ func TestMultiGoroutineTable_insert_dfs_length_eq_1048576(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, + GoroutineCount: 2, BatchSize: 10, - Throttle: 1, + Throttle: 1000, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -4765,9 +4506,9 @@ func TestMultiGoroutineTable_insert_dfs_length_eq_3000000(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, - BatchSize: 10, - Throttle: 1, + GoroutineCount: 2, + BatchSize: 10000, + Throttle: 100, PartitionCol: "volume", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt", @@ -4805,18 +4546,20 @@ func TestMultiGoroutineTable_insert_streamTable_multipleThread(t *testing.T) { Convey("func TestMultiGoroutineTable_insert_streamTable_multipleThread", t, func() { ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), host12, setup.UserName, setup.Password) So(err, ShouldBeNil) + t1 := "t1_" + generateRandomString(5) + t2 := "t2_" + generateRandomString(5) defer ddb.Close() - script := "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + - "tt=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tt as t2;" + script := "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as " + t1 + ";" + + "tt=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tt as " + t2 + ";" _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, + GoroutineCount: 2, BatchSize: 10, Throttle: 3, PartitionCol: "volume", Database: "", - TableName: "t2", + TableName: t2, UserID: setup.UserName, Password: setup.Password, Address: host12, @@ -4827,20 +4570,20 @@ func TestMultiGoroutineTable_insert_streamTable_multipleThread(t *testing.T) { waitGroup.Add(10) for i := 0; i < 10; i++ { go threadinsertData(mtt1, n) - insertDataTotable(n, "t1") + insertDataTotable(n, t1) } waitGroup.Wait() mtt1.WaitForGoroutineCompletion() - re1, err := ddb.RunScript("select * from t2 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + re1, err := ddb.RunScript("select * from " + t2 + " order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") So(err, ShouldBeNil) - ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + ex, err := ddb.RunScript("select * from " + t1 + " order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") So(err, ShouldBeNil) reTable1 := re1.(*model.Table) exTable := ex.(*model.Table) for i := 0; i < len(reTable1.GetColumnNames()); i++ { So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) } - _, err = ddb.RunScript("undef(`t1, SHARED)") + _, err = ddb.RunScript("undef(`" + t1 + "`" + t2 + ", SHARED)") So(err, ShouldBeNil) }) } @@ -4857,9 +4600,9 @@ func TestMultiGoroutineTable_insert_streamtable_200cols(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, + GoroutineCount: 2, BatchSize: 10000, - Throttle: 1, + Throttle: 1000, PartitionCol: "sym", Database: "", TableName: "trades", @@ -4927,9 +4670,9 @@ func TestMultiGoroutineTable_insert_dfstable_200cols(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, + GoroutineCount: 2, BatchSize: 1000, - Throttle: 1, + Throttle: 1000, PartitionCol: "tradeDate", Database: "dfs://test_MultithreadedTableWriter", TableName: "pt1", @@ -4997,9 +4740,9 @@ func TestMultiGoroutineTable_concurrentWrite_getFailedData_when_unfinished_write _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 10, + GoroutineCount: 2, BatchSize: 1000, - Throttle: 1, + Throttle: 1000, PartitionCol: "id", Database: "dfs://test_mtw_concurrentWrite_FailedData", TableName: "pt", @@ -5019,7 +4762,13 @@ func TestMultiGoroutineTable_concurrentWrite_getFailedData_when_unfinished_write re, err := ddb.RunScript("(exec count(*) from loadTable(Database, `pt) where val = 1)[0]") So(err, ShouldBeNil) reTable := re.(*model.Scalar) - So(failedData+len(UnwrittenData)+int(reTable.Value().(int32)), ShouldEqual, 10000) + unwrittenLength := 0 + for _, v := range UnwrittenData { + unwrittenLength += len(v[0].([]int32)) + // unwrittenLength += len(v[1].([]float64)) + // unwrittenLength += len(v[2].([]int32)) + } + So(failedData+unwrittenLength+int(reTable.Value().(int32)), ShouldEqual, 10000) _, err = ddb.RunScript("dropDatabase(\"dfs://test_mtw_concurrentWrite_FailedData\")") So(err, ShouldBeNil) }) @@ -5035,9 +4784,9 @@ func TestMultiGoroutineTable_insert_streamTable_eq_1024(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, - BatchSize: 10, - Throttle: 1, + GoroutineCount: 2, + BatchSize: 100000, + Throttle: 1000, PartitionCol: "volume", Database: "", TableName: "t2", @@ -5079,9 +4828,9 @@ func TestMultiGoroutineTable_insert_streamTable_eq_1048576(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, - BatchSize: 10, - Throttle: 1, + GoroutineCount: 2, + BatchSize: 100000, + Throttle: 1000, PartitionCol: "volume", Database: "", TableName: "t2", @@ -5125,9 +4874,9 @@ func TestMultiGoroutineTable_insert_streamTable_eq_3000000(t *testing.T) { _, err = ddb.RunScript(script) So(err, ShouldBeNil) opt1 := &mtw.Option{ - GoroutineCount: 20, - BatchSize: 10, - Throttle: 1, + GoroutineCount: 2, + BatchSize: 100000, + Throttle: 1000, PartitionCol: "volume", Database: "", TableName: "t2", diff --git a/test/setup/settings.go b/test/setup/settings.go index 1a28514..07c513e 100644 --- a/test/setup/settings.go +++ b/test/setup/settings.go @@ -4,15 +4,8 @@ import ( "math/rand" "strconv" "time" - - "github.com/dolphindb/api-go/model" ) -type Tuple struct { - Dt model.DataTypeByte - VecVal string -} - func getPort(ports []int) (int, []int) { rand.Seed(time.Now().UnixNano()) randomIndex := rand.Intn(len(ports)) diff --git a/test/streaming/goroutineClient_reverse_test.go b/test/streaming/goroutineClient_reverse_test.go index bbdb3cd..489b870 100644 --- a/test/streaming/goroutineClient_reverse_test.go +++ b/test/streaming/goroutineClient_reverse_test.go @@ -855,3 +855,397 @@ func TestGoroutineClient_unsubscribe_in_doEvent_r(t *testing.T) { gc_r.Close() assert.True(t, gc_r.IsClosed()) } + +func TestGoroutineClient_subscribe_allTypes_r(t *testing.T) { + var gc_r = streaming.NewGoroutineClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtString, "rand(`AAPL`MSFT`OPPO, 2)"}, {model.DtString, "array(STRING, 2,2,NULL)"}, + {model.DtSymbol, "take(`AAPL`MSFT, 2)"}, {model.DtSymbol, "array(SYMBOL, 2,2,NULL)"}, + {model.DtBlob, "take(blob(`A`B`C), 2)"}, {model.DtBlob, "array(BLOB, 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestGoroutineClient_subscribe_oneHandler_allTypes", t, func() { + _, err := gcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(gcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + Handler: &MessageHandler_allTypes{appender}, + Reconnect: true, + MsgAsTable: true, + } + + targetows := 1000 + err = gc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gcConn_r.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestGoroutineClient_subscribe_batchHandler_allTypes", t, func() { + _, err := gcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(gcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + BatchHandler: &MessageBatchHandler_allTypes{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + targetows := 1000 + err = gc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gcConn_r.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + gc_r.Close() + assert.True(t, gc_r.IsClosed()) +} + +func TestGoroutineClient_subscribe_arrayVector_r(t *testing.T) { + var gc_r = streaming.NewGoroutineClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestGoroutineClient_subscribe_oneHandler_arrayVector", t, func() { + _, err := gcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(gcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host, + TableName: st, + ActionName: "test_av", + Offset: 0, + Handler: &MessageHandler_av{appender}, + Reconnect: true, + MsgAsTable: true, + } + + targetows := 1000 + err = gc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gcConn_r.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestGoroutineClient_subscribe_batchHandler_arrayVector", t, func() { + _, err := gcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(gcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host, + TableName: st, + ActionName: "test_av", + Offset: 0, + BatchHandler: &MessageBatchHandler_av{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + targetows := 1000 + err = gc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gcConn_r.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + gc_r.Close() + assert.True(t, gc_r.IsClosed()) +} + +func TestGoroutineClient_subscribe_with_StreamDeserializer_arrayVector_r(t *testing.T) { + var gc_r = streaming.NewGoroutineClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestGoroutineClient_subscribe_oneHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := gcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + sdhandler, _ := createStreamDeserializer_av(gcConn_r, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + Handler: &sdhandler, + Reconnect: true, + } + + targetows := 2000 + err = gc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + if sdhandler.msg1_total+sdhandler.msg2_total == targetows { + break + } + } + err = gc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdhandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdhandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + _, err = gcConn_r.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = gcConn_r.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn_r.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = gcConn_r.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestGoroutineClient_subscribe_batchHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := gcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + _, sdBatchHandler := createStreamDeserializer_av(gcConn_r, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + BatchHandler: &sdBatchHandler, + Reconnect: true, + } + + req1.SetBatchSize(200) + targetows := 2000 + err = gc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + if sdBatchHandler.msg1_total+sdBatchHandler.msg2_total == targetows { + break + } + } + err = gc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdBatchHandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdBatchHandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + _, err = gcConn_r.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = gcConn_r.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn_r.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = gcConn_r.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + gc_r.Close() + assert.True(t, gc_r.IsClosed()) +} diff --git a/test/streaming/goroutineClient_test.go b/test/streaming/goroutineClient_test.go index 65ab6ec..ba74dc0 100644 --- a/test/streaming/goroutineClient_test.go +++ b/test/streaming/goroutineClient_test.go @@ -845,3 +845,405 @@ func TestGoroutineClient_unsubscribe_in_doEvent(t *testing.T) { gc.Close() assert.True(t, gc.IsClosed()) } + +func TestGoroutineClient_subscribe_allTypes(t *testing.T) { + var gc = streaming.NewGoroutineClient(setup.IP, setup.SubPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "rand(ipaddr(), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "rand(uuid(), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "rand(int128(), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtString, "rand(`AAPL`MSFT`OPPO, 2)"}, {model.DtString, "array(STRING, 2,2,NULL)"}, + {model.DtSymbol, "take(`AAPL`MSFT, 2)"}, {model.DtSymbol, "array(SYMBOL, 2,2,NULL)"}, + {model.DtBlob, "take(blob(`A`B`C), 2)"}, {model.DtBlob, "array(BLOB, 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestGoroutineClient_subscribe_oneHandler_allTypes", t, func() { + if !gcConn.IsConnected() { + err := gcConn.Connect() + So(err, ShouldBeNil) + } + _, err := gcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(gcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host1, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + Handler: &MessageHandler_allTypes{appender}, + Reconnect: true, + MsgAsTable: true, + } + + targetows := 1000 + err = gc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gcConn.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + // So(appender.Close(), ShouldBeNil) + }) + + Convey("TestGoroutineClient_subscribe_batchHandler_allTypes", t, func() { + _, err := gcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(gcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host1, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + BatchHandler: &MessageBatchHandler_allTypes{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + targetows := 1000 + err = gc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gcConn.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + // So(appender.Close(), ShouldBeNil) + }) + + } + gc.Close() + assert.True(t, gc.IsClosed()) +} + +func TestGoroutineClient_subscribe_arrayVector(t *testing.T) { + var gc = streaming.NewGoroutineClient(setup.IP, setup.SubPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestGoroutineClient_subscribe_oneHandler_arrayVector", t, func() { + _, err := gcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(gcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host1, + TableName: st, + ActionName: "test_av", + Offset: 0, + Handler: &MessageHandler_av{appender}, + Reconnect: true, + MsgAsTable: true, + } + + targetows := 1000 + err = gc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gcConn.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + // So(appender.Close(), ShouldBeNil) + }) + Convey("TestGoroutineClient_subscribe_batchHandler_arrayVector", t, func() { + _, err := gcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(gcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host1, + TableName: st, + ActionName: "test_av", + Offset: 0, + BatchHandler: &MessageBatchHandler_av{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + targetows := 1000 + err = gc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gcConn.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + // So(appender.Close(), ShouldBeNil) + }) + + } + gc.Close() + assert.True(t, gc.IsClosed()) +} + +func TestGoroutineClient_subscribe_with_StreamDeserializer_arrayVector(t *testing.T) { + var gc = streaming.NewGoroutineClient(setup.IP, setup.SubPort) + testDatas := []Tuple{ + {model.DtBool, "rand(0 1, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestGoroutineClient_subscribe_oneHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := gcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + sdhandler, _ := createStreamDeserializer_av(gcConn, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host1, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + Handler: &sdhandler, + Reconnect: true, + } + + targetows := 2000 + err = gc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + if sdhandler.msg1_total+sdhandler.msg2_total == targetows { + break + } + } + err = gc.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdhandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdhandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + _, err = gcConn.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = gcConn.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = gcConn.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestGoroutineClient_subscribe_batchHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := gcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + _, sdBatchHandler := createStreamDeserializer_av(gcConn, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host1, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + BatchHandler: &sdBatchHandler, + Reconnect: true, + } + + req1.SetBatchSize(200) + targetows := 2000 + err = gc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + if sdBatchHandler.msg1_total+sdBatchHandler.msg2_total == targetows { + break + } + } + err = gc.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdBatchHandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdBatchHandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + _, err = gcConn.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = gcConn.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gcConn.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = gcConn.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + } + gc.Close() + assert.True(t, gc.IsClosed()) +} diff --git a/test/streaming/goroutinePooledClient_reverse_test.go b/test/streaming/goroutinePooledClient_reverse_test.go index ef90657..f7c8df2 100644 --- a/test/streaming/goroutinePooledClient_reverse_test.go +++ b/test/streaming/goroutinePooledClient_reverse_test.go @@ -417,12 +417,11 @@ func TestNewGoroutinePooledClient_tableName_handler_offset_reconnect_success_r(t _, err = gpcConn_r.RunScript("stopPublishTable('" + setup.IP + "'," + strings.Split(host2, ":")[1] + ",'" + st + "')") So(err, ShouldBeNil) - time.Sleep(10 * time.Second) + waitData(gpcConn_r, receive, 2000) res, _ := gpcConn_r.RunScript("res = select * from " + receive + " order by tag;ex = select * from " + st + " order by tag;each(eqObj, ex.values(), res.values())") for _, val := range res.(*model.Vector).Data.Value() { So(val, ShouldBeTrue) } - waitData(gpcConn_r, receive, 2000) err = gpc_r.UnSubscribe(req) So(err, ShouldBeNil) ClearStreamTable(host2, st) @@ -697,3 +696,397 @@ func TestNewGoroutinePooledClient_unsubscribe_in_doEvent_r(t *testing.T) { gpc_r.Close() assert.True(t, gpc_r.IsClosed()) } + +func TestNewGoroutinePooledClient_subscribe_allTypes_r(t *testing.T) { + var gpc_r = streaming.NewGoroutinePooledClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtString, "rand(`AAPL`MSFT`OPPO, 2)"}, {model.DtString, "array(STRING, 2,2,NULL)"}, + {model.DtSymbol, "take(`AAPL`MSFT, 2)"}, {model.DtSymbol, "array(SYMBOL, 2,2,NULL)"}, + {model.DtBlob, "take(blob(`A`B`C), 2)"}, {model.DtBlob, "array(BLOB, 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewGoroutinePooledClient_subscribe_oneHandler_alltypes", t, func() { + _, err := gpcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(gpcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gpcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host2, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + Handler: &MessageHandler_allTypes{appender}, + Reconnect: true, + MsgAsTable: true, + } + + targetows := 1000 + err = gpc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gpcConn_r.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gpc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gpcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewGoroutinePooledClient_subscribe_batchHandler_alltypes", t, func() { + _, err := gpcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(gpcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gpcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host2, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + BatchHandler: &MessageBatchHandler_allTypes{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + targetows := 1000 + err = gpc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gpcConn_r.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gpc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gpcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + gpc_r.Close() + assert.True(t, gpc_r.IsClosed()) +} + +func TestNewGoroutinePooledClient_subscribe_arrayVector_r(t *testing.T) { + var gpc_r = streaming.NewGoroutinePooledClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewGoroutinePooledClient_subscribe_oneHandler_arrayVector", t, func() { + _, err := gpcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(gpcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gpcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host2, + TableName: st, + ActionName: "test_av", + Offset: 0, + Handler: &MessageHandler_av{appender}, + Reconnect: true, + MsgAsTable: true, + } + + targetows := 1000 + err = gpc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gpcConn_r.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gpc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gpcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewGoroutinePooledClient_subscribe_batchHandler_arrayVector", t, func() { + _, err := gpcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(gpcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gpcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host2, + TableName: st, + ActionName: "test_av", + Offset: 0, + BatchHandler: &MessageBatchHandler_av{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + targetows := 1000 + err = gpc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gpcConn_r.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gpc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gpcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + gpc_r.Close() + assert.True(t, gpc_r.IsClosed()) +} + +func TestNewGoroutinePooledClient_subscribe_with_StreamDeserializer_arrayVector_r(t *testing.T) { + var gpc_r = streaming.NewGoroutinePooledClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(0 1, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewGoroutinePooledClient_subscribe_oneHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := gpcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + sdhandler, _ := createStreamDeserializer_av(gpcConn_r, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host2, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + Handler: &sdhandler, + Reconnect: true, + } + + targetows := 2000 + err = gpc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + if sdhandler.msg1_total+sdhandler.msg2_total == targetows { + break + } + } + err = gpc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdhandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdhandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + _, err = gpcConn_r.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = gpcConn_r.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn_r.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = gpcConn_r.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewGoroutinePooledClient_subscribe_batchHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := gpcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + _, sdBatchHandler := createStreamDeserializer_av(gpcConn_r, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host2, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + BatchHandler: &sdBatchHandler, + Reconnect: true, + } + + req1.SetBatchSize(200) + targetows := 2000 + err = gpc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + if sdBatchHandler.msg1_total+sdBatchHandler.msg2_total == targetows { + break + } + } + err = gpc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdBatchHandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdBatchHandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + _, err = gpcConn_r.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = gpcConn_r.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn_r.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = gpcConn_r.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + gpc_r.Close() + assert.True(t, gpc_r.IsClosed()) +} diff --git a/test/streaming/goroutinePooledClient_test.go b/test/streaming/goroutinePooledClient_test.go index e12f87a..7c6fd30 100644 --- a/test/streaming/goroutinePooledClient_test.go +++ b/test/streaming/goroutinePooledClient_test.go @@ -461,12 +461,12 @@ func TestNewGoroutinePooledClient_tableName_handler_offseteconnect_success(t *te _, err = gpcConn.RunScript("stopPublishTable('" + setup.IP + "'," + strings.Split(host3, ":")[1] + ",'" + st + "')") So(err, ShouldBeNil) - time.Sleep(10 * time.Second) + waitData(gpcConn, receive, 2000) res, _ := gpcConn.RunScript("res = select * from " + receive + " order by tag;ex = select * from " + st + " order by tag;each(eqObj, ex.values(), res.values())") for _, val := range res.(*model.Vector).Data.Value() { So(val, ShouldBeTrue) } - waitData(gpcConn, receive, 2000) + err = gpc.UnSubscribe(req) So(err, ShouldBeNil) ClearStreamTable(host3, st) @@ -697,3 +697,396 @@ func TestNewGoroutinePooledClient_unsubscribe_in_doEvent(t *testing.T) { gpc.Close() assert.True(t, gpc.IsClosed()) } + +func TestNewGoroutinePooledClient_subscribe_allTypes(t *testing.T) { + var gpc = streaming.NewGoroutinePooledClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtString, "rand(`AAPL`MSFT`OPPO, 2)"}, {model.DtString, "array(STRING, 2,2,NULL)"}, + {model.DtSymbol, "take(`AAPL`MSFT, 2)"}, {model.DtSymbol, "array(SYMBOL, 2,2,NULL)"}, + {model.DtBlob, "take(blob(`A`B`C), 2)"}, {model.DtBlob, "array(BLOB, 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewGoroutinePooledClient_subscribe_oneHandler_alltypes", t, func() { + _, err := gpcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(gpcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gpcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host2, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + Handler: &MessageHandler_allTypes{appender}, + Reconnect: true, + MsgAsTable: true, + } + + targetows := 1000 + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gpcConn.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gpcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewGoroutinePooledClient_subscribe_batchHandler_alltypes", t, func() { + _, err := gpcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(gpcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gpcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host2, + TableName: st, + ActionName: "test_alltypes", + Offset: 0, + BatchHandler: &MessageBatchHandler_allTypes{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + targetows := 1000 + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gpcConn.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gpcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + gpc.Close() + assert.True(t, gpc.IsClosed()) +} + +func TestNewGoroutinePooledClient_subscribe_arrayVector(t *testing.T) { + var gpc = streaming.NewGoroutinePooledClient(setup.IP, setup.SubPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewGoroutinePooledClient_subscribe_oneHandler_arrayVector", t, func() { + _, err := gpcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(gpcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gpcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host3, + TableName: st, + ActionName: "test_av", + Offset: 0, + Handler: &MessageHandler_av{appender}, + Reconnect: true, + MsgAsTable: true, + } + + targetows := 1000 + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gpcConn.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gpcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;share ex as t_ex; share res as tes;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewGoroutinePooledClient_subscribe_batchHandler_arrayVector", t, func() { + _, err := gpcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(gpcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: gpcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host3, + TableName: st, + ActionName: "test_av", + Offset: 0, + BatchHandler: &MessageBatchHandler_av{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + targetows := 1000 + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + rows, _ := gpcConn.RunScript("exec count(*) from " + re) + fmt.Println("now rows:", rows.(*model.Scalar).Value()) + if int(rows.(*model.Scalar).Value().(int32)) == targetows { + break + } + } + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = gpcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + gpc.Close() + assert.True(t, gpc.IsClosed()) +} + +func TestNewGoroutinePooledClient_subscribe_with_StreamDeserializer_arrayVector(t *testing.T) { + var gpc = streaming.NewGoroutinePooledClient(setup.IP, setup.SubPort) + testDatas := []Tuple{ + {model.DtBool, "rand(0 1, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewGoroutinePooledClient_subscribe_oneHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := gpcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + sdhandler, _ := createStreamDeserializer_av(gpcConn, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host3, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + Handler: &sdhandler, + Reconnect: true, + } + + targetows := 2000 + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + if sdhandler.msg1_total+sdhandler.msg2_total == targetows { + break + } + } + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdhandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdhandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + _, err = gpcConn.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = gpcConn.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = gpcConn.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewGoroutinePooledClient_subscribe_batchHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := gpcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + _, sdBatchHandler := createStreamDeserializer_av(gpcConn, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host3, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + BatchHandler: &sdBatchHandler, + Reconnect: true, + } + + req1.SetBatchSize(200) + targetows := 2000 + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + for { + time.Sleep(1 * time.Second) + if sdBatchHandler.msg1_total+sdBatchHandler.msg2_total == targetows { + break + } + } + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdBatchHandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdBatchHandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + _, err = gpcConn.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = gpcConn.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = gpcConn.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = gpcConn.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + } + gpc.Close() + assert.True(t, gpc.IsClosed()) +} diff --git a/test/streaming/pollingClient_reverse_test.go b/test/streaming/pollingClient_reverse_test.go index 77e6b07..36f9f06 100644 --- a/test/streaming/pollingClient_reverse_test.go +++ b/test/streaming/pollingClient_reverse_test.go @@ -1011,3 +1011,505 @@ func TestPollingClient_subscribe_with_StreamDeserializer_r(t *testing.T) { pc_r.Close() assert.True(t, pc_r.IsClosed()) } + +func TestNewPollingClient_subscribe_allTypes_r(t *testing.T) { + var pc_r = streaming.NewPollingClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtString, "rand(`AAPL`MSFT`OPPO, 2)"}, {model.DtString, "array(STRING, 2,2,NULL)"}, + {model.DtSymbol, "take(`AAPL`MSFT, 2)"}, {model.DtSymbol, "array(SYMBOL, 2,2,NULL)"}, + {model.DtBlob, "take(blob(`A`B`C), 2)"}, {model.DtBlob, "array(BLOB, 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewPollingClient_subscribe_oneHandler_arrayVector", t, func() { + _, err := pcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(pcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: pcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host4, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + Handler: &MessageHandler_allTypes{appender}, + Reconnect: true, + } + + q, err := pc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + msgs := q.Poll(1000, 1000) + for _, msg := range msgs { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + val := msg.GetValue(i).(*model.Scalar) + // fmt.Println(valV) + dtlist := model.NewDataTypeList(val.GetDataType(), []model.DataType{val.DataType}) + colV[i] = model.NewVector(dtlist) + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := appender.Append(tmp) + AssertNil(err) + } + + err = pc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = pcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewPollingClient_subscribe_batchHandler_allTypes", t, func() { + _, err := pcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(pcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: pcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host4, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + BatchHandler: &MessageBatchHandler_allTypes{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + q, err := pc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + + msgs := q.Poll(1000, 1000) + for _, msg := range msgs { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + if i == 0 { + dtlist := model.NewEmptyDataTypeList(model.DtTimestamp, 0) + dtlist.Append(msg.GetValue(i).(*model.Scalar).DataType) + colV[i] = model.NewVector(dtlist) + } else { + val := msg.GetValue(i).(*model.Scalar) + // fmt.Println(valV) + dtlist := model.NewDataTypeList(val.GetDataType(), []model.DataType{val.DataType}) + colV[i] = model.NewVector(dtlist) + } + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := appender.Append(tmp) + AssertNil(err) + } + + err = pc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = pcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + pc_r.Close() + assert.True(t, pc_r.IsClosed()) +} + +func TestNewPollingClient_subscribe_arrayVector_r(t *testing.T) { + var pc_r = streaming.NewPollingClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewPollingClient_subscribe_oneHandler_arrayVector", t, func() { + _, err := pcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(pcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: pcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host4, + TableName: st, + ActionName: "test_av", + Offset: 0, + Handler: &MessageHandler_av{appender}, + Reconnect: true, + } + + q, err := pc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + msgs := q.Poll(1000, 1000) + for _, msg := range msgs { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + if i == 0 { + dtlist := model.NewEmptyDataTypeList(model.DtTimestamp, 0) + dtlist.Append(msg.GetValue(i).(*model.Scalar).DataType) + colV[i] = model.NewVector(dtlist) + } else { + valV := msg.GetValue(i).(*model.Scalar).Value().(*model.Vector) + // fmt.Println(valV) + av := model.NewArrayVector([]*model.Vector{valV}) + colV[i] = model.NewVectorWithArrayVector(av) + } + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := appender.Append(tmp) + AssertNil(err) + } + + err = pc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = pcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewPollingClient_subscribe_batchHandler_arrayVector", t, func() { + _, err := pcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(pcConn_r, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: pcConn_r, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host4, + TableName: st, + ActionName: "testStreamDeserializer", + Offset: 0, + BatchHandler: &MessageBatchHandler_av{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + q, err := pc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + + msgs := q.Poll(1000, 1000) + for _, msg := range msgs { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + if i == 0 { + dtlist := model.NewEmptyDataTypeList(model.DtTimestamp, 0) + dtlist.Append(msg.GetValue(i).(*model.Scalar).DataType) + colV[i] = model.NewVector(dtlist) + } else { + valV := msg.GetValue(i).(*model.Scalar).Value().(*model.Vector) + // fmt.Println(valV) + av := model.NewArrayVector([]*model.Vector{valV}) + colV[i] = model.NewVectorWithArrayVector(av) + } + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := appender.Append(tmp) + AssertNil(err) + } + + err = pc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = pcConn_r.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn_r.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + pc_r.Close() + assert.True(t, pc_r.IsClosed()) +} + +func TestNewPollingClient_subscribe_with_StreamDeserializer_arrayVector_r(t *testing.T) { + var pc_r = streaming.NewPollingClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(0 1, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewPollingClient_subscribe_oneHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := pcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + sdhandler, _ := createStreamDeserializer_av(pcConn_r, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host4, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + Handler: &sdhandler, + Reconnect: true, + } + + targetows := 2000 + q, err := pc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + + msgs := q.Poll(1000, targetows) + for _, msg := range msgs { + + ret, err := sdhandler.sd.Parse(msg) + AssertNil(err) + sym := ret.GetSym() + if sym == "msg1" { + sdhandler.msg1_total += 1 + for i := 0; i < len(sdhandler.coltype1); i++ { + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + sdhandler.res1_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + sdhandler.res1_data[i].AppendVectorValue(val.GetVectorValue(0)) + } + } + // fmt.Println(s.res1_data) + + } else if sym == "msg2" { + sdhandler.msg2_total += 1 + for i := 0; i < len(sdhandler.coltype2); i++ { + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + sdhandler.res2_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + sdhandler.res2_data[i].AppendVectorValue(val.GetVectorValue(0)) + } + } + } + } + + err = pc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdhandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdhandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + So(sdhandler.msg1_total+sdhandler.msg2_total, ShouldEqual, targetows) + _, err = pcConn_r.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = pcConn_r.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn_r.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = pcConn_r.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + Convey("TestNewPollingClient_subscribe_batchHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := pcConn_r.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + _, sdBatchHandler := createStreamDeserializer_av(pcConn_r, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host4, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + BatchHandler: &sdBatchHandler, + Reconnect: true, + } + + req1.SetBatchSize(200) + targetows := 2000 + q, err := pc_r.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + + msgs := q.Poll(1000, targetows) + for _, msg := range msgs { + + ret, err := sdBatchHandler.sd.Parse(msg) + AssertNil(err) + sym := ret.GetSym() + if sym == "msg1" { + sdBatchHandler.msg1_total += 1 + for i := 0; i < len(sdBatchHandler.coltype1); i++ { + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + sdBatchHandler.res1_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + sdBatchHandler.res1_data[i].AppendVectorValue(val.GetVectorValue(0)) + } + } + // fmt.Println(s.res1_data) + + } else if sym == "msg2" { + sdBatchHandler.msg2_total += 1 + for i := 0; i < len(sdBatchHandler.coltype2); i++ { + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + sdBatchHandler.res2_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + sdBatchHandler.res2_data[i].AppendVectorValue(val.GetVectorValue(0)) + } + } + } + } + + err = pc_r.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdBatchHandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdBatchHandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + So(sdBatchHandler.msg1_total+sdBatchHandler.msg2_total, ShouldEqual, targetows) + _, err = pcConn_r.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = pcConn_r.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn_r.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = pcConn_r.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + } + pc_r.Close() + assert.True(t, pc_r.IsClosed()) +} diff --git a/test/streaming/pollingClient_test.go b/test/streaming/pollingClient_test.go index 9a37b6d..d83989e 100644 --- a/test/streaming/pollingClient_test.go +++ b/test/streaming/pollingClient_test.go @@ -538,7 +538,7 @@ func TestPollingClient_tableName_actionName(t *testing.T) { assert.True(t, pc.IsClosed()) } -func TestPollingClient_tableName_handler_offset_reconnect_success(t *testing.T) { +func TestPollingClient_tableName_handler_offseteconnect_success(t *testing.T) { var pc = streaming.NewPollingClient(setup.IP, setup.SubPort) Convey("TestPollingClient_tableName_handler_offseteconnect_success", t, func() { st, receive := CreateStreamingTableWithRandomName(pcConn) @@ -573,7 +573,7 @@ func TestPollingClient_tableName_handler_offset_reconnect_success(t *testing.T) _, err := pcConn.RunScript(script) AssertNil(err) } - res, _ := pcConn.RunScript("res = select * from " + receive + " order by tag;ex = select * from " + st + " order by tag;share res as res_t;share ex as ex_t;each(eqObj, ex.values(), res.values())") + res, _ := pcConn.RunScript("res = select * from " + receive + " order by tag;ex = select * from " + st + " order by tag;each(eqObj, ex.values(), res.values())") for _, val := range res.(*model.Vector).Data.Value() { So(val, ShouldBeTrue) } @@ -1009,3 +1009,504 @@ func TestPollingClient_subscribe_with_StreamDeserializer(t *testing.T) { pc.Close() assert.True(t, pc.IsClosed()) } + +func TestNewPollingClient_subscribe_allTypes(t *testing.T) { + var pc = streaming.NewPollingClient(setup.IP, setup.Reverse_subPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtString, "rand(`AAPL`MSFT`OPPO, 2)"}, {model.DtString, "array(STRING, 2,2,NULL)"}, + {model.DtSymbol, "take(`AAPL`MSFT, 2)"}, {model.DtSymbol, "array(SYMBOL, 2,2,NULL)"}, + {model.DtBlob, "take(blob(`A`B`C), 2)"}, {model.DtBlob, "array(BLOB, 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewPollingClient_subscribe_oneHandler_arrayVector", t, func() { + _, err := pcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(pcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: pcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host4, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + Handler: &MessageHandler_allTypes{appender}, + Reconnect: true, + } + + q, err := pc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + msgs := q.Poll(1000, 1000) + for _, msg := range msgs { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + val := msg.GetValue(i).(*model.Scalar) + // fmt.Println(valV) + dtlist := model.NewDataTypeList(val.GetDataType(), []model.DataType{val.DataType}) + colV[i] = model.NewVector(dtlist) + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := appender.Append(tmp) + AssertNil(err) + } + + err = pc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = pcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewPollingClient_subscribe_batchHandler_allTypes", t, func() { + _, err := pcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_allTypes(pcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: pcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host4, + TableName: st, + ActionName: "test_allTypes", + Offset: 0, + BatchHandler: &MessageBatchHandler_allTypes{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + q, err := pc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + + msgs := q.Poll(1000, 1000) + for _, msg := range msgs { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + if i == 0 { + dtlist := model.NewEmptyDataTypeList(model.DtTimestamp, 0) + dtlist.Append(msg.GetValue(i).(*model.Scalar).DataType) + colV[i] = model.NewVector(dtlist) + } else { + val := msg.GetValue(i).(*model.Scalar) + // fmt.Println(valV) + dtlist := model.NewDataTypeList(val.GetDataType(), []model.DataType{val.DataType}) + colV[i] = model.NewVector(dtlist) + } + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := appender.Append(tmp) + AssertNil(err) + } + + err = pc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = pcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + pc.Close() + assert.True(t, pc.IsClosed()) +} + +func TestNewPollingClient_subscribe_arrayVector(t *testing.T) { + var pc = streaming.NewPollingClient(setup.IP, setup.SubPort) + testDatas := []Tuple{ + {model.DtBool, "rand(true false, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewPollingClient_subscribe_oneHandler_arrayVector", t, func() { + _, err := pcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(pcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: pcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host5, + TableName: st, + ActionName: "test_av", + Offset: 0, + Handler: &MessageHandler_av{appender}, + Reconnect: true, + } + + q, err := pc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + msgs := q.Poll(1000, 1000) + for _, msg := range msgs { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + if i == 0 { + dtlist := model.NewEmptyDataTypeList(model.DtTimestamp, 0) + dtlist.Append(msg.GetValue(i).(*model.Scalar).DataType) + colV[i] = model.NewVector(dtlist) + } else { + valV := msg.GetValue(i).(*model.Scalar).Value().(*model.Vector) + // fmt.Println(valV) + av := model.NewArrayVector([]*model.Vector{valV}) + colV[i] = model.NewVectorWithArrayVector(av) + } + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := appender.Append(tmp) + AssertNil(err) + } + + err = pc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = pcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;share ex as t_ex; share res as tes;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewPollingClient_subscribe_batchHandler_arrayVector", t, func() { + _, err := pcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};") + So(err, ShouldBeNil) + st, re := CreateStreamingTableWithRandomName_av(pcConn, data.Dt, data.VecVal) + appenderOpt := &api.TableAppenderOption{ + TableName: re, + Conn: pcConn, + } + appender := api.NewTableAppender(appenderOpt) + req1 := &streaming.SubscribeRequest{ + Address: host5, + TableName: st, + ActionName: "testStreamDeserializer", + Offset: 0, + BatchHandler: &MessageBatchHandler_av{appender}, + Reconnect: true, + } + req1.SetBatchSize(100) + q, err := pc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + + msgs := q.Poll(1000, 1000) + for _, msg := range msgs { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + if i == 0 { + dtlist := model.NewEmptyDataTypeList(model.DtTimestamp, 0) + dtlist.Append(msg.GetValue(i).(*model.Scalar).DataType) + colV[i] = model.NewVector(dtlist) + } else { + valV := msg.GetValue(i).(*model.Scalar).Value().(*model.Vector) + // fmt.Println(valV) + av := model.NewArrayVector([]*model.Vector{valV}) + colV[i] = model.NewVectorWithArrayVector(av) + } + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := appender.Append(tmp) + AssertNil(err) + } + + err = pc.UnSubscribe(req1) + So(err, ShouldBeNil) + + _, err = pcConn.RunScript("res = select * from " + re + " order by ts;ex= select * from " + st + " order by ts;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn.RunScript( + "try{ dropStreamTable(`" + st + ");}catch(ex){};" + + "try{ dropStreamTable(`" + re + ");}catch(ex){};" + + "try{ dropStreamTable(`st1);}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};go") + So(err, ShouldBeNil) + }) + + } + pc.Close() + assert.True(t, pc.IsClosed()) +} + +func TestNewPollingClient_subscribe_with_StreamDeserializer_arrayVector(t *testing.T) { + var pc = streaming.NewPollingClient(setup.IP, setup.SubPort) + testDatas := []Tuple{ + {model.DtBool, "rand(0 1, 2)"}, {model.DtBool, "array(BOOL, 2,2,NULL)"}, + {model.DtChar, "rand(127c, 2)"}, {model.DtChar, "array(CHAR, 2,2,NULL)"}, + {model.DtShort, "rand(32767h, 2)"}, {model.DtShort, "array(SHORT, 2,2,NULL)"}, + {model.DtInt, "rand(2147483647, 2)"}, {model.DtInt, "array(INT, 2,2,NULL)"}, + {model.DtLong, "rand(1000l, 2)"}, {model.DtLong, "array(LONG, 2,2,NULL)"}, + {model.DtDate, "rand(2019.01.01, 2)"}, {model.DtDate, "array(DATE, 2,2,NULL)"}, + {model.DtMonth, "rand(2019.01M, 2)"}, {model.DtMonth, "array(MONTH, 2,2,NULL)"}, + {model.DtTime, "rand(12:00:00.123, 2)"}, {model.DtTime, "array(TIME, 2,2,NULL)"}, + {model.DtMinute, "rand(12:00m, 2)"}, {model.DtMinute, "array(MINUTE, 2,2,NULL)"}, + {model.DtSecond, "rand(12:00:00, 2)"}, {model.DtSecond, "array(SECOND, 2,2,NULL)"}, + {model.DtDatetime, "rand(2019.01.01 12:00:00, 2)"}, {model.DtDatetime, "array(DATETIME, 2,2,NULL)"}, + {model.DtTimestamp, "rand(2019.01.01 12:00:00.123, 2)"}, {model.DtTimestamp, "array(TIMESTAMP, 2,2,NULL)"}, + {model.DtNanoTime, "rand(12:00:00.123456789, 2)"}, {model.DtNanoTime, "array(NANOTIME, 2,2,NULL)"}, + {model.DtNanoTimestamp, "rand(2019.01.01 12:00:00.123456789, 2)"}, {model.DtNanoTimestamp, "array(NANOTIMESTAMP, 2,2,NULL)"}, + {model.DtDateHour, "rand(datehour(100), 2)"}, {model.DtDateHour, "array(DATEHOUR, 2,2,NULL)"}, + {model.DtFloat, "rand(10.00f, 2)"}, {model.DtFloat, "array(FLOAT, 2,2,NULL)"}, + {model.DtDouble, "rand(10.00, 2)"}, {model.DtDouble, "array(DOUBLE, 2,2,NULL)"}, + {model.DtIP, "take(ipaddr('192.168.1.1'), 2)"}, {model.DtIP, "array(IPADDR, 2,2,NULL)"}, + {model.DtUUID, "take(uuid('12345678-1234-1234-1234-123456789012'), 2)"}, {model.DtUUID, "array(UUID, 2,2,NULL)"}, + {model.DtInt128, "take(int128(`e1671797c52e15f763380b45e841ec32), 2)"}, {model.DtInt128, "array(INT128, 2,2,NULL)"}, + {model.DtDecimal32, "decimal32(rand('-1.123''''2.23468965412', 2), 8)"}, {model.DtDecimal32, "array(DECIMAL32(2), 2,2,NULL)"}, + {model.DtDecimal64, "decimal64(rand('-1.123''''2.123123123123123123', 2), 15)"}, {model.DtDecimal64, "array(DECIMAL64(15), 2,2,NULL)"}, + {model.DtDecimal128, "decimal128(rand('-1.123''''2.123123123123123123123123123', 2), 25)"}, {model.DtDecimal128, "array(DECIMAL128(25), 2,2,NULL)"}, + {model.DtComplex, "take(complex(1,2), 2)"}, {model.DtComplex, "array(COMPLEX, 2,2,NULL)"}, + {model.DtPoint, "take(point(1, 2), 2)"}, {model.DtPoint, "array(POINT, 2,2,NULL)"}, + } + for _, data := range testDatas { + Convey("TestNewPollingClient_subscribe_oneHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := pcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + sdhandler, _ := createStreamDeserializer_av(pcConn, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host5, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + Handler: &sdhandler, + Reconnect: true, + } + + targetows := 2000 + q, err := pc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + + msgs := q.Poll(1000, targetows) + for _, msg := range msgs { + + ret, err := sdhandler.sd.Parse(msg) + AssertNil(err) + sym := ret.GetSym() + if sym == "msg1" { + sdhandler.msg1_total += 1 + for i := 0; i < len(sdhandler.coltype1); i++ { + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + sdhandler.res1_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + sdhandler.res1_data[i].AppendVectorValue(val.GetVectorValue(0)) + } + } + // fmt.Println(s.res1_data) + + } else if sym == "msg2" { + sdhandler.msg2_total += 1 + for i := 0; i < len(sdhandler.coltype2); i++ { + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + sdhandler.res2_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + sdhandler.res2_data[i].AppendVectorValue(val.GetVectorValue(0)) + } + } + } + } + + err = pc.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdhandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdhandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + So(sdhandler.msg1_total+sdhandler.msg2_total, ShouldEqual, targetows) + _, err = pcConn.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = pcConn.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = pcConn.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + Convey("TestNewPollingClient_subscribe_batchHandler_with_StreamDeserializer_arrayVector", t, func() { + tbname := "outTables_" + getRandomStr(8) + _, err := pcConn.RunScript( + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + _, sdBatchHandler := createStreamDeserializer_av(pcConn, tbname, data.Dt, data.VecVal) + req1 := &streaming.SubscribeRequest{ + Address: host5, + TableName: tbname, + ActionName: "testStreamDeserializer", + Offset: 0, + BatchHandler: &sdBatchHandler, + Reconnect: true, + } + + req1.SetBatchSize(200) + targetows := 2000 + q, err := pc.Subscribe(req1) + So(err, ShouldBeNil) + fmt.Println("started subscribe...") + + msgs := q.Poll(1000, targetows) + for _, msg := range msgs { + + ret, err := sdBatchHandler.sd.Parse(msg) + AssertNil(err) + sym := ret.GetSym() + if sym == "msg1" { + sdBatchHandler.msg1_total += 1 + for i := 0; i < len(sdBatchHandler.coltype1); i++ { + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + sdBatchHandler.res1_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + sdBatchHandler.res1_data[i].AppendVectorValue(val.GetVectorValue(0)) + } + } + // fmt.Println(s.res1_data) + + } else if sym == "msg2" { + sdBatchHandler.msg2_total += 1 + for i := 0; i < len(sdBatchHandler.coltype2); i++ { + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + sdBatchHandler.res2_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + sdBatchHandler.res2_data[i].AppendVectorValue(val.GetVectorValue(0)) + } + } + } + } + + err = pc.UnSubscribe(req1) + So(err, ShouldBeNil) + + res_tab1 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1", "price2"}, sdBatchHandler.res1_data) + res_tab2 := model.NewTable([]string{"datetimev", "timestampv", "sym", "price1"}, sdBatchHandler.res2_data) + + // fmt.Println("res_tab1: ", res_tab1) + // fmt.Println("res_tab2: ", res_tab2) + // So(res_tab1.get, ShouldEqual, model.DtAny) + So(sdBatchHandler.msg1_total+sdBatchHandler.msg2_total, ShouldEqual, targetows) + _, err = pcConn.Upload(map[string]model.DataForm{"res1": res_tab1, "res2": res_tab2}) + AssertNil(err) + _, err = pcConn.RunScript("res = select * from res1 order by datetimev,timestampv;ex= select * from table1 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + + _, err = pcConn.RunScript("res = select * from res2 order by datetimev,timestampv;ex= select * from table2 order by datetimev,timestampv;assert each(eqObj, res.values(), ex.values())") + AssertNil(err) + _, err = pcConn.RunScript( + "try{ dropStreamTable(`" + tbname + ");}catch(ex){};" + + "try{ dropStreamTable(`st2);}catch(ex){};" + + "try{ undef(`table1, SHARED);}catch(ex){};" + + "try{ undef(`table2, SHARED);}catch(ex){};go") + So(err, ShouldBeNil) + }) + } + pc.Close() + assert.True(t, pc.IsClosed()) +} diff --git a/test/streaming/util.go b/test/streaming/util.go index 0440571..83bcc77 100644 --- a/test/streaming/util.go +++ b/test/streaming/util.go @@ -6,6 +6,7 @@ import ( "fmt" "math/big" "reflect" + "strconv" "strings" "sync" "time" @@ -27,6 +28,11 @@ var ( MemTableName = "memTable" ) +type Tuple struct { + Dt model.DataTypeByte + VecVal string +} + func AssertNil(err error) { if err != nil { panic(fmt.Sprintf("err is not nil: %s", err.Error())) @@ -751,6 +757,69 @@ func CreateStreamingTableWithRandomName(conn api.DolphinDB) (string, string) { return st, re } +func CreateStreamingTableWithRandomName_allTypes(conn api.DolphinDB, dataType model.DataTypeByte, vecVal string) (string, string) { + suffix := getRandomStr(5) + typeString := strings.ToUpper(model.GetDataTypeString(dataType)) + if strings.Contains(typeString, "DECIMAL32") { + typeString = "DECIMAL32(5)" + } else if strings.Contains(typeString, "DECIMAL64") { + typeString = "DECIMAL64(15)" + } else if strings.Contains(typeString, "DECIMAL128") { + typeString = "DECIMAL128(33)" + } + fmt.Println(`test type: `, typeString) + + _, err := conn.RunScript("login(`admin,`123456);" + + "try{dropStreamTable('st1')}catch(ex){};" + + "try{dropStreamTable('st2')}catch(ex){};") + AssertNil(err) + st := "arrayVectorTable_" + suffix + re := "Receive_arrayVectorTable_" + suffix + _, err = conn.RunScript( + "colName = `ts`c1;" + + "colType = [TIMESTAMP, " + typeString + "];" + + "st1 = streamTable(1:0,colName, colType);st2 = streamTable(1:0,colName, colType);" + + "enableTableShareAndPersistence(table=st1, tableName=`" + st + ");share(st2, `" + re + ");go;" + + "n = 1000;" + + "tmp = table(1:0,colName, colType);" + + "for(i in 0:n){tableInsert(tmp, timestamp(1+i), array(" + typeString + ").append!(" + vecVal + "[0]));};" + + "replay(inputTables=tmp, outputTables=`" + st + ", dateColumn=`ts, timeColumn=`ts);") + AssertNil(err) + return st, re +} + +func CreateStreamingTableWithRandomName_av(conn api.DolphinDB, dataType model.DataTypeByte, vecVal string) (string, string) { + suffix := getRandomStr(5) + typeString := strings.ToUpper(model.GetDataTypeString(dataType)) + if strings.Contains(typeString, "DECIMAL32") { + typeString = "DECIMAL32(5)" + } else if strings.Contains(typeString, "DECIMAL64") { + typeString = "DECIMAL64(15)" + } else if strings.Contains(typeString, "DECIMAL128") { + typeString = "DECIMAL128(33)" + } + typeString = typeString + "[]" + fmt.Println(`test type: `, typeString) + + _, err := conn.RunScript("login(`admin,`123456);" + + "try{dropStreamTable('st1')}catch(ex){};" + + "try{dropStreamTable('st2')}catch(ex){};") + AssertNil(err) + st := "arrayVectorTable_" + suffix + re := "Receive_arrayVectorTable_" + suffix + _, err = conn.RunScript( + "colName = `ts`c1;" + + "colType = [TIMESTAMP, " + typeString + "];" + + "st1 = streamTable(1:0,colName, colType);st2 = streamTable(1:0,colName, colType);" + + "enableTableShareAndPersistence(table=st1, tableName=`" + st + ");share(st2, `" + re + ");go;" + + "n = 1000;" + + "tmp = table(1:0,colName, colType);" + + "for(i in 0:n){tableInsert(tmp, timestamp(1+i), array(" + typeString + ").append!([" + vecVal + "]));};" + + "replay(inputTables=tmp, outputTables=`" + st + ", dateColumn=`ts, timeColumn=`ts);") + AssertNil(err) + return st, re +} + var wg sync.WaitGroup func threadWriteData(conn api.DolphinDB, tabName string, batch int) { @@ -794,6 +863,23 @@ type MessageHandler_table struct { conn api.DolphinDB } +type MessageBatchHandler_av struct { + appender *api.TableAppender +} + +// test with msgAsTable=true +type MessageHandler_av struct { + appender *api.TableAppender +} + +type MessageHandler_allTypes struct { + appender *api.TableAppender +} + +type MessageBatchHandler_allTypes struct { + appender *api.TableAppender +} + type MessageHandler_unsubscribeInDoEvent struct { subType string subClient interface{} @@ -895,6 +981,80 @@ func (s *MessageHandler_unsubscribeInDoEvent) DoEvent(msg streaming.IMessage) { } } +func (s *MessageBatchHandler_av) DoEvent(msgv []streaming.IMessage) { + for _, msg := range msgv { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + if i == 0 { + dtlist := model.NewEmptyDataTypeList(model.DtTimestamp, 0) + dtlist.Append(msg.GetValue(i).(*model.Scalar).DataType) + colV[i] = model.NewVector(dtlist) + } else { + valV := msg.GetValue(i).(*model.Scalar).Value().(*model.Vector) + // fmt.Println(valV) + av := model.NewArrayVector([]*model.Vector{valV}) + colV[i] = model.NewVectorWithArrayVector(av) + } + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := s.appender.Append(tmp) + AssertNil(err) + } +} + +func (s *MessageHandler_av) DoEvent(msg streaming.IMessage) { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + colV[i] = msg.GetValue(i).(*model.Vector) + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := s.appender.Append(tmp) + AssertNil(err) +} + +func (s *MessageBatchHandler_allTypes) DoEvent(msgv []streaming.IMessage) { + for _, msg := range msgv { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + if i == 0 { + dtlist := model.NewEmptyDataTypeList(model.DtTimestamp, 0) + dtlist.Append(msg.GetValue(i).(*model.Scalar).DataType) + colV[i] = model.NewVector(dtlist) + } else { + val := msg.GetValue(i).(*model.Scalar) + // fmt.Println(valV) + dtlist := model.NewDataTypeList(val.GetDataType(), []model.DataType{val.DataType}) + colV[i] = model.NewVector(dtlist) + } + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := s.appender.Append(tmp) + AssertNil(err) + } +} + +func (s *MessageHandler_allTypes) DoEvent(msg streaming.IMessage) { + var colV = make([]*model.Vector, 2) + var colNamesV = make([]string, 2) + for i := 0; i < 2; i++ { + colV[i] = msg.GetValue(i).(*model.Vector) + colNamesV[i] = "col" + strconv.Itoa(i) + } + tmp := model.NewTable(colNamesV, colV) + // fmt.Println(tmp) + _, err := s.appender.Append(tmp) + AssertNil(err) +} + func (s *sdHandler) DoEvent(msg streaming.IMessage) { ret, err := s.sd.Parse(msg) AssertNil(err) @@ -929,12 +1089,12 @@ func (s *sdHandler) DoEvent(msg streaming.IMessage) { } func (s *sdBatchHandler) DoEvent(msgs []streaming.IMessage) { + s.lock.Lock() for _, msg := range msgs { ret, err := s.sd.Parse(msg) AssertNil(err) sym := ret.GetSym() - s.lock.Lock() if sym == "msg1" { s.msg1_total += 1 AssertEqual(ret.Size(), 5) @@ -960,9 +1120,8 @@ func (s *sdBatchHandler) DoEvent(msgs []streaming.IMessage) { } } - s.lock.Unlock() } - + s.lock.Unlock() } func (s *sdHandler_av) DoEvent(msg streaming.IMessage) { @@ -973,33 +1132,27 @@ func (s *sdHandler_av) DoEvent(msg streaming.IMessage) { s.lock.Lock() if sym == "msg1" { s.msg1_total += 1 - AssertEqual(ret.Size(), 5) for i := 0; i < len(s.coltype1); i++ { - AssertEqual(ret.GetValue(i).GetDataType(), s.coltype1[i]) - fmt.Println(ret.GetValue(i).GetDataFormString()) if i != 3 { - val := ret.GetValue(i).(*model.Scalar).Value() - dt, err := model.NewDataType(s.coltype1[i], val) - AssertNil(err) - AssertNil(s.res1_data[i].Append(dt)) + val := ret.GetValue(i).(*model.Scalar).DataType + s.res1_data[i].Append(val) } else { val := ret.GetValue(i).(*model.Vector) - dt, err := model.NewDataType(s.coltype1[i], val) - AssertNil(err) - AssertNil(s.res1_data[i].Append(dt)) + s.res1_data[i].AppendVectorValue(val.GetVectorValue(0)) } } + // fmt.Println(s.res1_data) } else if sym == "msg2" { s.msg2_total += 1 - AssertEqual(ret.Size(), 4) for i := 0; i < len(s.coltype2); i++ { - AssertEqual(ret.GetValue(i).GetDataType(), s.coltype2[i]) - fmt.Println(ret.GetValue(i).GetDataFormString()) - val := ret.GetValue(i).(*model.Scalar).Value() - dt, err := model.NewDataType(s.coltype2[i], val) - AssertNil(err) - AssertNil(s.res2_data[i].Append(dt)) + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + s.res2_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + s.res2_data[i].AppendVectorValue(val.GetVectorValue(0)) + } } } s.lock.Unlock() @@ -1014,32 +1167,32 @@ func (s *sdBatchHandler_av) DoEvent(msgs []streaming.IMessage) { s.lock.Lock() if sym == "msg1" { s.msg1_total += 1 - AssertEqual(ret.Size(), 5) for i := 0; i < len(s.coltype1); i++ { - AssertEqual(ret.GetValue(i).GetDataType(), s.coltype1[i]) - // fmt.Println(ret.GetValue(i).(*model.Scalar).Value()) - val := ret.GetValue(i).(*model.Scalar).Value() - dt, err := model.NewDataType(s.coltype1[i], val) - AssertNil(err) - AssertNil(s.res1_data[i].Append(dt)) + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + s.res1_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + s.res1_data[i].AppendVectorValue(val.GetVectorValue(0)) + } } + // fmt.Println(s.res1_data) } else if sym == "msg2" { s.msg2_total += 1 - AssertEqual(ret.Size(), 4) for i := 0; i < len(s.coltype2); i++ { - AssertEqual(ret.GetValue(i).GetDataType(), s.coltype2[i]) - // fmt.Println(ret.GetValue(i).GetDataType(), ex_types2[i]) - val := ret.GetValue(i).(*model.Scalar).Value() - dt, err := model.NewDataType(s.coltype2[i], val) - AssertNil(err) - AssertNil(s.res2_data[i].Append(dt)) + if i != 3 { + val := ret.GetValue(i).(*model.Scalar).DataType + s.res2_data[i].Append(val) + } else { + val := ret.GetValue(i).(*model.Vector) + s.res2_data[i].AppendVectorValue(val.GetVectorValue(0)) + } } - } - s.lock.Unlock() - } + } + s.lock.Unlock() } func createStreamDeserializer(conn api.DolphinDB, tbname string) (sdHandler, sdBatchHandler) { @@ -1061,7 +1214,7 @@ func createStreamDeserializer(conn api.DolphinDB, tbname string) (sdHandler, sdB tableInsert(t, 2012.01.01T01:21:23 + 1..n, 2018.12.01T01:21:23.000 + 1..n, take("a1""b1""c1",n), rand(100,n)+rand(1.0, n)); dbpath="dfs://test_dfs";if(existsDatabase(dbpath)){dropDatabase(dbpath)};db=database(dbpath, VALUE, "a1""b1""c1"); db.createPartitionedTable(t,"table2","sym").append!(t); - t2 = select * from loadTable(dbpath,"table2");share t2 as table2; + tmp2 = select * from loadTable(dbpath,"table2");share tmp2 as table2; d = dict(['msg1','msg2'], [table1, table2]); replay(inputTables=d, outputTables="` + tbname + `", dateColumn="timestampv", timeColumn="timestampv")`) AssertNil(err)