From dddf54e6baa0e92e0ca8ca7e4766ff38eefe1446 Mon Sep 17 00:00:00 2001
From: Yee <2520865+yixinglu@users.noreply.github.com>
Date: Sun, 26 Sep 2021 07:08:40 +0800
Subject: [PATCH] Add docker major version tag when releasing (#2921)
---
.github/actions/tagname-action/action.yml | 4 +++-
.github/workflows/nightly.yml | 2 +-
.github/workflows/release.yml | 13 ++++++++++++-
README-CN.md | 4 ++--
README.md | 4 ++--
tests/tck/conftest.py | 7 +++++++
6 files changed, 27 insertions(+), 7 deletions(-)
diff --git a/.github/actions/tagname-action/action.yml b/.github/actions/tagname-action/action.yml
index 81c8b1011eb..8132c96f470 100644
--- a/.github/actions/tagname-action/action.yml
+++ b/.github/actions/tagname-action/action.yml
@@ -13,7 +13,9 @@ runs:
- id: tag
run: |
tag=$(echo ${{ github.ref }} | rev | cut -d/ -f1 | rev)
- tagnum=$(echo $tag |sed 's/^v//')
+ tagnum=$(echo $tag | sed 's/^v//')
+ majorver=$(echo $tag | cut -d '.' -f 1)
echo "::set-output name=tag::$tag"
echo "::set-output name=tagnum::$tagnum"
+ echo "::set-output name=majorver::$majorver"
shell: bash
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index cdcd55fd93d..0bb60d43d0f 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -39,7 +39,7 @@ jobs:
echo "::set-output name=subdir::$subdir"
- uses: actions/upload-artifact@v1
with:
- name: ${{ matrix.os }}-v2-nightly
+ name: ${{ matrix.os }}-nightly
path: pkg-build/cpack_output
- uses: ./.github/actions/upload-to-oss-action
with:
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 0456901f7dd..962e76d8330 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -62,6 +62,14 @@ jobs:
- uses: actions/checkout@v2
- uses: ./.github/actions/tagname-action
id: tagname
+ - id: docker
+ run: |
+ majorver=$(git tag -l --sort=v:refname | tail -n1 | cut -f1 -d'.')
+ tag=""
+ if [[ $majorver == ${{ steps.tagname.outputs.majorver }} ]]; then
+ tag="vesoft/nebula-${{ matrix.service }}:latest"
+ fi
+ echo "::set-output name=tag::$tag"
- uses: docker/setup-buildx-action@v1
- uses: docker/login-action@v1
with:
@@ -71,7 +79,10 @@ jobs:
with:
context: .
file: ./docker/Dockerfile.${{ matrix.service }}
- tags: vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.tag }},vesoft/nebula-${{ matrix.service }}:latest
+ tags: |
+ vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.tag }}
+ vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.majorver }}
+ ${{ steps.docker.outputs.tag }}
push: true
build-args: |
BRANCH=${{ steps.tagname.outputs.tag }}
diff --git a/README-CN.md b/README-CN.md
index a86cf921a5c..691ae03e97e 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -1,5 +1,5 @@
-
+
中文 | English
世界上唯一能够容纳千亿个顶点和万亿条边,并提供毫秒级查询延时的图数据库解决方案
@@ -34,7 +34,7 @@
## 发布通告
-v1.x和v2.5.0之后的版本,Nebula Graph在这个repo管理。如需获取v2.0.0到v2.5.0之间的版本,请访问[Nebula Graph repo](https://github.com/vesoft-inc/nebula-graph)。
+v1.x和v2.5.1之后的版本,Nebula Graph在这个repo管理。如需获取v2.0.0到v2.5.1之间的版本,请访问[Nebula Graph repo](https://github.com/vesoft-inc/nebula-graph)。
Nebula Graph 1.x 后续不再进行功能的更新,请升级到2.0版本中。Nebula Graph内核 1.x 与 2.x数据格式、通信协议、客户端等均双向不兼容,可参照[升级指导](https://docs.nebula-graph.com.cn/2.5.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-graph-to-250/)进行升级。
diff --git a/README.md b/README.md
index 1f9f13e5824..ea5c8e4f23e 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
English | 中文
A distributed, scalable, lightning-fast graph database
@@ -31,7 +31,7 @@ Compared with other graph database solutions, **Nebula Graph** has the following
## Notice of Release
-This repository hosts the source code of Nebula Graph versions before 2.0.0-alpha and after v2.5.0. If you are looking to use the versions between v2.0.0 and v2.5.0, please head to [Nebula Graph repo](https://github.com/vesoft-inc/nebula-graph).
+This repository hosts the source code of Nebula Graph versions before 2.0.0-alpha and after v2.5.1. If you are looking to use the versions between v2.0.0 and v2.5.1, please head to [Nebula Graph repo](https://github.com/vesoft-inc/nebula-graph).
Nebula Graph 1.x is not actively maintained. Please move to Nebula Graph 2.x. The data format, rpc protocols, clients, etc. are not compatible between Nebula Graph v1.x and v2.x, but we do offer [upgrade guide from 1.x to v2.5.0](https://docs.nebula-graph.io/2.5.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-graph-to-250/).
diff --git a/tests/tck/conftest.py b/tests/tck/conftest.py
index 1a0abf392b3..ad245fe2957 100644
--- a/tests/tck/conftest.py
+++ b/tests/tck/conftest.py
@@ -39,6 +39,7 @@
register_dict = {}
register_lock = threading.Lock()
+
def normalize_outline_scenario(request, name):
for group in example_pattern.findall(name):
fixval = request.getfixturevalue(group)
@@ -167,6 +168,7 @@ def new_space(request, options, session, graph_spaces):
graph_spaces["space_desc"] = space_desc
graph_spaces["drop_space"] = True
+
@given(parse("Any graph"))
def new_space(request, session, graph_spaces):
name = "EmptyGraph_" + space_generator()
@@ -182,6 +184,7 @@ def new_space(request, session, graph_spaces):
graph_spaces["space_desc"] = space_desc
graph_spaces["drop_space"] = True
+
@given(parse('load "{data}" csv data to a new space'))
def import_csv_data(request, data, graph_spaces, session, pytestconfig):
data_dir = os.path.join(DATA_DIR, normalize_outline_scenario(request, data))
@@ -477,6 +480,7 @@ def check_plan(plan, graph_spaces):
differ = PlanDiffer(resp.plan_desc(), expect)
assert differ.diff(), differ.err_msg()
+
@when(parse("executing query via graph {index:d}:\n{query}"))
def executing_query(query, index, graph_spaces, session_from_first_conn_pool, session_from_second_conn_pool, request):
assert index < 2, "There exists only 0,1 graph: {}".format(index)
@@ -486,12 +490,14 @@ def executing_query(query, index, graph_spaces, session_from_first_conn_pool, se
else:
exec_query(request, ngql, session_from_second_conn_pool, graph_spaces)
+
@then(parse("the result should be, the first {n:d} records in order, and register {column_name} as a list named {key}:\n{result}"))
def result_should_be_in_order_and_register_key(n, column_name, key, request, result, graph_spaces):
assert n > 0, f"The records number should be an positive integer: {n}"
result_ds = cmp_dataset(request, graph_spaces, result, order=True, strict=True, contains=CmpType.CONTAINS, first_n_records=n)
register_result_key(request.node.name, result_ds, column_name, key)
+
def register_result_key(test_name, result_ds, column_name, key):
if column_name.encode() not in result_ds.column_names:
assert False, f"{column_name} not in result columns {result_ds.column_names}."
@@ -501,6 +507,7 @@ def register_result_key(test_name, result_ds, column_name, key):
register_dict[test_name + key] = val;
register_lock.release()
+
@when(parse("executing query, fill replace holders with element index of {indices} in {keys}:\n{query}"))
def executing_query_with_params(query, indices, keys, graph_spaces, session, request):
indices_list=[int(v) for v in indices.split(",")]