Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor plugin option #45

Merged
merged 1 commit into from
Jul 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ all: build
pairs := darwin/amd64 linux/amd64 linux/arm64
GOPATH ?= ~/go
export GO111MODULE=on
VERSION ?= v1.1.0
K6_VERSION ?= v0.43.0

fmt:
Expand All @@ -15,7 +14,8 @@ lint :

build:
go install github.com/k6io/xk6/cmd/[email protected]
$(GOPATH)/bin/xk6 build $(K6_VERSION) --with github.com/vesoft-inc/k6-plugin@$(VERSION);
version=$(git describe --tags `git rev-list --tags --max-count=1`) \
$(GOPATH)/bin/xk6 build $(K6_VERSION) --with github.com/vesoft-inc/k6-plugin@$(version);

build-all: build-arm-v7

Expand Down
183 changes: 111 additions & 72 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ Used to test [NebulaGraph](https://github.com/vesoft-inc/nebula).

## Version match

k6-plugin now support NebulaGraph above v2.5.0.
k6-plugin now support NebulaGraph above v3.0.0.

## Build

Expand All @@ -32,6 +32,13 @@ Then:
2. Build the binary:

```bash
# build with the latest version.
make

# build with local source code
make build-dev

# or build with specified version
xk6 build --with github.com/vesoft-inc/k6-plugin@{version}
# e.g. build v0.0.8
xk6 build --with github.com/vesoft-inc/[email protected]
Expand All @@ -45,41 +52,46 @@ xk6 build --with github.com/vesoft-inc/k6-plugin@master
import nebulaPool from 'k6/x/nebulagraph';
import { check } from 'k6';
import { Trend } from 'k6/metrics';
import { sleep } from 'k6';

var latencyTrend = new Trend('latency');
var responseTrend = new Trend('responseTime');
// initial nebula connect pool
// by default the channel buffer size is 20000, you can reset it with
// var pool = nebulaPool.initWithSize("192.168.8.152:9669", {poolSize}, {bufferSize}); e.g.
// var pool = nebulaPool.initWithSize("192.168.8.152:9669", 1000, 4000)
var pool = nebulaPool.init("192.168.8.152:9669", 400);
var latencyTrend = new Trend('latency', true);
var responseTrend = new Trend('responseTime', true);

// option configuration, please refer more details in this doc.
var graph_option = {
address: "192.168.8.6:10010",
space: "sf1",
csv_path: "person.csv",
csv_delimiter: "|",
csv_with_header: true
};

nebulaPool.setOption(graph_option);
var pool = nebulaPool.init();
// initial session for every vu
var session = pool.getSession("root", "nebula")
session.execute("USE sf1")
var session = pool.getSession()

String.prototype.format = function () {
var formatted = this;
var data = arguments[0]

export function setup() {
// config csv file
pool.configCSV("person.csv", "|", false)
// config output file, save every query information
pool.configOutput("output.csv")
sleep(1)
}
formatted = formatted.replace(/\{(\d+)\}/g, function (match, key) {
return data[key]
})
return formatted
};

export default function (data) {
// get csv data from csv file
let d = session.getData()
// d[0] means the first column data in the csv file
let ngql = 'go 2 steps from ' + d[0] + ' over KNOWS '
// {0} means the first column data in the csv file
let ngql = 'go 2 steps from {0} over KNOWS'.format(d)
let response = session.execute(ngql)
check(response, {
"IsSucceed": (r) => r.isSucceed() === true
});
// add trend
latencyTrend.add(response.getLatency());
responseTrend.add(response.getResponseTime());
latencyTrend.add(response.getLatency()/1000);
responseTrend.add(response.getResponseTime()/1000);
};

export function teardown() {
Expand All @@ -95,45 +107,39 @@ export function teardown() {
# -d means the duration that test running, e.g. `3s` means 3 seconds, `5m` means 5 minutes.
>./k6 run nebula-test.js -u 3 -d 3s


/\ |‾‾| /‾‾/ /‾‾/
/\ / \ | |/ / / /
/ \/ \ | ( / ‾‾\
/ \ | |\ \ | (‾) |
/ __________ \ |__| \__\ \_____/ .io

INFO[0000] 2021/07/07 16:50:25 [INFO] begin init the nebula pool
INFO[0000] 2021/07/07 16:50:25 [INFO] connection pool is initialized successfully
INFO[0000] 2021/07/07 16:50:25 [INFO] finish init the pool
testing option: {"pool_policy":"connection","output":"output.csv","output_channel_size":10000,"address":"192.168.8.6:10010","timeout_us":0,"idletime_us":0,"max_size":400,"min_size":0,"username":"root","password":"nebula","space":"sf1","csv_path":"person.csv","csv_delimiter":"|","csv_with_header":true,"csv_channel_size":10000,"csv_data_limit":500000,"retry_times":0,"retry_interval_us":0,"retry_timeout_us":0,"ssl_ca_pem_path":"","ssl_client_pem_path":"","ssl_client_key_path":""}
execution: local
script: nebula-test.js
output: -
output: engine

scenarios: (100.00%) 1 scenario, 3 max VUs, 33s max duration (incl. graceful stop):
* default: 3 looping VUs for 3s (gracefulStop: 30s)

INFO[0004] 2021/07/07 16:50:29 [INFO] begin close the nebula pool

running (04.1s), 0/3 VUs, 570 complete and 0 interrupted iterations
default ✓ [======================================] 3 VUs 3s
INFO[0004] 2021/07/07 16:50:29 [INFO] begin init the nebula pool
INFO[0004] 2021/07/07 16:50:29 [INFO] connection pool is initialized successfully
INFO[0004] 2021/07/07 16:50:29 [INFO] finish init the pool

✓ IsSucceed

█ setup

█ teardown

checks...............: 100.00% ✓ 570 ✗ 0
checks...............: 100.00% ✓ 3529 ✗ 0
data_received........: 0 B 0 B/s
data_sent............: 0 B 0 B/s
iteration_duration...: avg=17.5ms min=356.6µs med=11.44ms max=1s p(90)=29.35ms p(95)=38.73ms
iterations...........: 570 139.877575/s
latency..............: avg=2986.831579 min=995 med=2663 max=18347 p(90)=4518.4 p(95)=5803
responseTime.........: avg=15670.263158 min=4144 med=11326.5 max=108286 p(90)=28928.9 p(95)=38367.1
vus..................: 3 min=0 max=3
vus_max..............: 3 min=3 max=3
iteration_duration...: avg=2.54ms min=129.28µs med=1.78ms max=34.99ms p(90)=5.34ms p(95)=6.79ms
iterations...........: 3529 1174.135729/s
latency..............: avg=1.98ms min=439µs med=1.42ms max=27.77ms p(90)=4.11ms p(95)=5.12ms
responseTime.........: avg=2.48ms min=495µs med=1.72ms max=34.93ms p(90)=5.27ms p(95)=6.71ms
vus..................: 3 min=3 max=3
vus_max..............: 3 min=3 max=3


running (03.0s), 0/3 VUs, 3529 complete and 0 interrupted iterations
default ✓ [======================================] 3 VUs 3s
```

* `checks`, one check per iteration, verify `isSucceed` by default.
Expand All @@ -154,38 +160,71 @@ The `output.csv` saves data as below:
```bash
>head output.csv

timestamp,nGQL,latency,responseTime,isSucceed,rows,errorMsg
1625647825,USE sf1,7808,10775,true,0,
1625647825,USE sf1,4055,7725,true,0,
1625647825,USE sf1,3431,10231,true,0,
1625647825,USE sf1,2938,5600,true,0,
1625647825,USE sf1,2917,5410,true,0,
1625647826,go 2 steps from 933 over KNOWS ,6022,24537,true,1680,
1625647826,go 2 steps from 1129 over KNOWS ,6141,25861,true,1945,
1625647826,go 2 steps from 4194 over KNOWS ,6317,26309,true,1581,
1625647826,go 2 steps from 8698 over KNOWS ,4388,22597,true,1530,
```

## Advanced usage

By default, all vus use the same channel to read the csv data.

You can change the strategy before `getSession` function.

As each vu uses a separate channel, you can reduce channel buffer size to save memory.

```js
// initial nebula connect pool, channel buffer size is 4000
var pool = nebulaPool.initWithSize("192.168.8.61:9669", 400, 4000);

// set csv strategy, 1 means each vu has a separate csv reader.
pool.configCsvStrategy(1)

// initial session for every vu
var session = pool.getSession("root", "nebula")
timestamp,nGQL,latency,responseTime,isSucceed,rows,firstRecord,errorMsg
1689576531,go 2 steps from 4194 over KNOWS yield dst(edge),4260,5151,true,1581,32985348838665,
1689576531,go 2 steps from 8333 over KNOWS yield dst(edge),4772,5772,true,2063,32985348833536,
1689576531,go 2 steps from 1129 over KNOWS yield dst(edge),5471,6441,true,1945,19791209302529,
1689576531,go 2 steps from 8698 over KNOWS yield dst(edge),3453,4143,true,1530,28587302322946,
1689576531,go 2 steps from 8853 over KNOWS yield dst(edge),4361,5368,true,2516,28587302324992,
1689576531,go 2 steps from 2199023256684 over KNOWS yield dst(edge),2259,2762,true,967,32985348833796,
1689576531,go 2 steps from 2199023262818 over KNOWS yield dst(edge),638,732,true,0,,
1689576531,go 2 steps from 10027 over KNOWS yield dst(edge),5182,6701,true,3288,30786325580290,
1689576531,go 2 steps from 2199023261211 over KNOWS yield dst(edge),2131,2498,true,739,32985348833794,
```

Please refer to [nebula-test-insert.js](./example/nebula-test-insert.js) for more details.
## Plugin Option

Pool options

---
| Key | Type | Default | Description |
|---|---|---|---|
|pool_policy|string|connection|'connection' or 'session', using which pool to test |
|address |string||NebulaGraph address, e.g. '192.168.8.6:9669,192.168.8.7:9669'|
|timeout_us|int|0|client connetion timeout, 0 means no timeout|
|idletime_us|int|0|client connection idle timeout, 0 means no timeout|
|max_size|int|400|max client connections in pool|
|min_size|int|0|min client connections in pool|
|username|string|root|NebulaGraph username|
|password|string|nebula|NebulaGraph password|
|space|string||NebulaGraph space|

Output options

---
| Key | Type | Default | Description |
|---|---|---|---|
|output|string||output file path|
|output_channel_size|int|10000| size of output channel|

CSV options

---
| Key | Type | Default | Description |
|---|---|---|---|
|csv_path|string||csv file path|
|csv_delimiter|string|,|delimiter of csv file|
|csv_with_header|bool|false|if ture, would ignore the first record|
|csv_channel_size|int|10000|size of csv reader channel|
|csv_data_limit|int|500000|would load [x] rows in memory, and then send to channel in loop|

Retry options

---
| Key | Type | Default | Description |
|---|---|---|---|
|retry_times|int|0|max retry times|
|retry_interval_us|int|0|interval duration for next retry|
|retry_timeout_us|int|0|retry timeout|

SSL options

---
| Key | Type | Default | Description |
|---|---|---|---|
|ssl_ca_pem_path|string||if it is not blank, would use SSL connection. ca pem path|
|ssl_client_pem_path|string||client pem path|
|ssl_client_key_path|string||client key path|

## Batch insert

Expand Down
57 changes: 27 additions & 30 deletions example/nebula-test-insert-limit-rate.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,23 @@
import nebulaPool from 'k6/x/nebulagraph';
import { check } from 'k6';
import { Trend } from 'k6/metrics';
import { sleep } from 'k6';

var latencyTrend = new Trend('latency');
var responseTrend = new Trend('responseTime');
// initial nebula connect pool
var pool = nebulaPool.initWithSize("192.168.8.61:9669,192.168.8.62:9669,192.168.8.63:9669", 400, 4000);
var latencyTrend = new Trend('latency', true);
var responseTrend = new Trend('responseTime', true);

// set csv strategy, 1 means each vu has a separate csv reader.
pool.configCsvStrategy(1)
var graph_option = {
address: "192.168.8.6:10010",
space: "sf1",
csv_path: "person.csv",
csv_delimiter: "|",
csv_with_header: true,
output: "output.csv"
};

nebulaPool.setOption(graph_option);
var pool = nebulaPool.init();
// initial session for every vu
var session = pool.getSession("root", "nebula")
session.execute("USE ldbc")
var session = pool.getSession()

// concurrent 300, and each second, 1000 iterations would be made.
export const options = {
Expand All @@ -35,32 +39,27 @@ export const options = {
},
};

export function setup() {
// config csv file
pool.configCSV("person.csv", "|", false)
// config output file, save every query information
pool.configOutput("output.csv")
sleep(1)
}
String.prototype.format = function() {
var formatted = this;
var data = arguments[0]

formatted = formatted.replace(/\{(\d+)\}/g, function(match, key) {
return data[key]
})
return formatted
};

export default function (data) {
export default function(data) {
// get csv data from csv file
let ngql = 'INSERT VERTEX Person(firstName, lastName, gender, birthday, creationDate, locationIP, browserUsed) VALUES '
let batches = []
let batchSize = 1
// batch size
let batchSize = 10
for (let i = 0; i < batchSize; i++) {
let d = session.getData();
let values = []
// concat the insert value
for (let index = 1; index < 8; index++) {
let value = '"' + d[index] + '"'
values.push(value)
}
let batch = d[0] + ":(" + values.join(",") + ")"
batches.push(batch)
let value = "{0}:(\"{1}\",\"{2}\", \"{3}\", \"{4}\", datetime(\"{5}\"), \"{6}\", \"{7}\")".format(d)
batches.push(value)
}
ngql = ngql + batches.join(',')
ngql = ngql + " " + batches.join(',')
let response = session.execute(ngql)
check(response, {
"IsSucceed": (r) => r.isSucceed() === true
Expand All @@ -74,5 +73,3 @@ export default function (data) {
export function teardown() {
pool.close()
}


Loading