Skip to content

Commit

Permalink
Add ci for standalone compile and ut. (vesoft-inc#550)
Browse files Browse the repository at this point in the history
<!--
Thanks for your contribution!
In order to review PR more efficiently, please add information according to the template.
-->

#### What type of PR is this?
- [x] bug
- [ ] feature
- [ ] enhancement

#### What problem(s) does this PR solve?
Issue(s) number: 

Description:
Add workflow to check standalone compile and ut. PYTEST and TCK comes later.


#### How do you solve it?


  
#### Special notes for your reviewer, ex. impact of this fix, design document, etc:



#### Checklist:
Tests:
- [ ] Unit test(positive and negative cases)
- [ ] Function test
- [ ] Performance test
- [ ] N/A

Affects:
- [ ] Documentation affected (Please add the label if documentation needs to be modified.)
- [ ] Incompatibility (If it breaks the compatibility, please describe it and add the label.)
- [ ] If it's needed to cherry-pick (If cherry-pick to some branches is required, please label the destination version(s).)
- [ ] Performance impacted: Consumes more CPU/Memory


#### Release notes:

Please confirm whether to be reflected in release notes and how to describe:
> ex. Fixed the bug .....


Migrated from vesoft-inc#3637

Co-authored-by: Alex Xing <[email protected]>
  • Loading branch information
nebula-bots and SuperYoko authored Jan 27, 2022
1 parent 01377c9 commit da3f3ac
Show file tree
Hide file tree
Showing 15 changed files with 450 additions and 28 deletions.
96 changes: 96 additions & 0 deletions .github/workflows/pull_request.yml
Original file line number Diff line number Diff line change
Expand Up @@ -206,3 +206,99 @@ jobs:
with:
name: ${{ matrix.os }}-${{ matrix.compiler }}-nebula-test-logs
path: ./build/server_*/logs/

standalone:
name: standalone-build
needs: lint
runs-on: [self-hosted, nebula]
strategy:
fail-fast: false
matrix:
os:
- centos7
compiler:
- gcc-9.3
container:
image: reg.vesoft-inc.com/vesoft/nebula-dev:${{ matrix.os }}
credentials:
username: ${{ secrets.HARBOR_USERNAME }}
password: ${{ secrets.HARBOR_PASSWORD }}
env:
CCACHE_DIR: /tmp/ccache/nebula/${{ matrix.os }}-${{ matrix.compiler }}
CCACHE_MAXSIZE: 8G
volumes:
- /tmp/ccache/nebula/${{ matrix.os }}-${{ matrix.compiler }}:/tmp/ccache/nebula/${{ matrix.os }}-${{ matrix.compiler }}
options: --cap-add=SYS_PTRACE
steps:
- uses: webiny/[email protected]
with:
run: sh -c "find . -mindepth 1 -delete"
- uses: actions/checkout@v2
- name: Prepare environment
id: prepare
run: |
[ -d build/ ] && rm -rf build/* || mkdir -p build
make init -C tests
- name: Decrypt License
run: sh ./.github/workflows/decrypt_secret.sh
env:
SECRET_PASSPHRASE: ${{ secrets.LICENSE_PASSWORD }}
- name: CMake
id: cmake
run: |
case ${{ matrix.compiler }} in
gcc-*)
case ${{ matrix.os }} in
centos7)
# build with Release type
cmake \
-DCMAKE_CXX_COMPILER=$TOOLSET_GCC_DIR/bin/g++ \
-DCMAKE_C_COMPILER=$TOOLSET_GCC_DIR/bin/gcc \
-DCMAKE_BUILD_TYPE=Release \
-DENABLE_TESTING=on \
-DENABLE_STANDALONE_VERSION=on \
-GNinja \
-B build
echo "::set-output name=j::10"
;;
esac
;;
esac
- name: Make
run: |
ccache -z
ninja -j $(nproc)
ccache -s
working-directory: build/
- name: CTest
env:
ASAN_OPTIONS: fast_unwind_on_malloc=1
run: ctest -j $(($(nproc)/2+1)) --timeout 400 --output-on-failure
working-directory: build/
timeout-minutes: 20
- name: Setup Cluster
run: |
make standalone-up
working-directory: tests/
timeout-minutes: 60
- name: TCK
run: |
make RM_DIR=false DEBUG=false J=${{ steps.cmake.outputs.j }} standalone-tck
working-directory: tests/
timeout-minutes: 60
- name: LDBC
run: |
make RM_DIR=false DEBUG=false J=${{ steps.cmake.outputs.j }} ldbc
working-directory: tests/
timeout-minutes: 60
- name: Down cluster
run: |
make RM_DIR=false down
working-directory: tests/
timeout-minutes: 2
- name: Upload logs
uses: actions/upload-artifact@v2
if: ${{ failure() }}
with:
name: ${{ matrix.os }}-${{ matrix.compiler }}-nebula-test-logs
path: ./build/server_*/logs/
1 change: 1 addition & 0 deletions conf/nebula-standalone.conf.default
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-standalone.pid
--license_path=share/resources/nebula.license
# Whether to enable optimizer
--enable_optimizer=true
# The default charset when a space is created
Expand Down
14 changes: 8 additions & 6 deletions src/clients/storage/StorageClientBase-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -240,16 +240,18 @@ void StorageClientBase<ClientType, ClientManagerType>::getResponseImpl(
DCHECK(!!ioThreadPool_);
evb = ioThreadPool_->getEventBase();
}
auto reqPtr = std::make_shared<std::pair<HostAddr, Request>>(std::move(request.first),
std::move(request.second));
folly::via(
evb,
[evb, request = std::move(request), remoteFunc = std::move(remoteFunc), pro, this]() mutable {
auto host = request.first;
[evb, request = std::move(reqPtr), remoteFunc = std::move(remoteFunc), pro, this]() mutable {
auto host = request->first;
auto client = clientsMan_->client(host, evb, false, FLAGS_storage_client_timeout_ms);
auto spaceId = request.second.get_space_id();
auto partsId = getReqPartsId(request.second);
remoteFunc(client.get(), request.second)
auto spaceId = request->second.get_space_id();
auto partsId = getReqPartsId(request->second);
remoteFunc(client.get(), request->second)
.via(evb)
.thenValue([spaceId, pro, this](Response&& resp) mutable {
.thenValue([spaceId, pro, request, this](Response&& resp) mutable {
auto& result = resp.get_result();
for (auto& code : result.get_failed_parts()) {
VLOG(3) << "Failure! Failed part " << code.get_part_id() << ", failed code "
Expand Down
2 changes: 2 additions & 0 deletions src/daemons/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,7 @@ nebula_add_executable(
$<TARGET_OBJECTS:charset_obj>
$<TARGET_OBJECTS:graph_obj>
$<TARGET_OBJECTS:ft_es_graph_adapter_obj>
$<TARGET_OBJECTS:audit_obj>
$<TARGET_OBJECTS:storage_thrift_obj>
$<TARGET_OBJECTS:storage_server>
$<TARGET_OBJECTS:internal_storage_service_handler>
Expand All @@ -288,6 +289,7 @@ nebula_add_executable(
$<TARGET_OBJECTS:meta_http_handler>
$<TARGET_OBJECTS:meta_version_man_obj>
$<TARGET_OBJECTS:meta_data_upgrade_obj>
$<TARGET_OBJECTS:meta_v2_thrift_obj>
${storage_meta_deps}
${common_deps}
LIBRARIES
Expand Down
55 changes: 50 additions & 5 deletions src/daemons/StandAloneDaemon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "MetaDaemonInit.h"
#include "common/base/Base.h"
#include "common/base/SignalHandler.h"
#include "common/encryption/License.h"
#include "common/fs/FileUtils.h"
#include "common/hdfs/HdfsCommandHelper.h"
#include "common/network/NetworkUtils.h"
Expand Down Expand Up @@ -46,6 +47,7 @@ using nebula::HostAddr;
using nebula::ProcessUtils;
using nebula::Status;
using nebula::StatusOr;
using nebula::encryption::License;
using nebula::network::NetworkUtils;

void setupThreadManager();
Expand All @@ -68,6 +70,12 @@ DECLARE_string(flagfile);
DECLARE_bool(containerized);
DECLARE_bool(reuse_port);
DECLARE_string(meta_server_addrs);
DECLARE_string(license_path);
DEFINE_string(meta_sync_listener,
"",
"It is a list of IPs split by comma, used in cluster deployment"
"the ips number is equal to the replica number."
"If empty, it means it's a single node");

// storage gflags
DEFINE_string(data_path,
Expand All @@ -78,10 +86,7 @@ DEFINE_string(wal_path,
"",
"Nebula wal path. By default, wal will be stored as a sibling of "
"rocksdb data.");
DEFINE_string(listener_path,
"",
"Path for listener, only wal will be saved."
"if it is not empty, data_path will not take effect.");
DECLARE_string(listener_path);
DECLARE_int32(storage_port);

// meta gflags
Expand Down Expand Up @@ -163,6 +168,13 @@ int main(int argc, char *argv[]) {
return EXIT_FAILURE;
}

// load the time zone data
status = nebula::time::Timezone::init();
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}

// Initialize the global timezone, it's only used for datetime type compute
// won't affect the process timezone.
status = nebula::time::Timezone::initializeGlobalTimezone();
Expand Down Expand Up @@ -196,7 +208,40 @@ int main(int argc, char *argv[]) {
LOG(ERROR) << "Can't get peers address, status:" << peersRet.status();
return;
}
gMetaKVStore = initKV(peersRet.value(), metaLocalhost);

// License validation
LOG(INFO) << "-----------------Validate license-----------------\n";
LOG(INFO) << "License path: " << FLAGS_license_path;
auto licensePath = FLAGS_license_path;

status = License::validateLicense(licensePath);
if (!status.ok()) {
LOG(ERROR) << status;
return;
}

// Save license content
auto license = License::getInstance();
std::string contentStr = "";
License::parseLicenseContent(FLAGS_license_path, contentStr);
license->content = folly::parseJson(contentStr);

nebula::HostAddr syncListener("", 0);
if (!FLAGS_meta_sync_listener.empty()) {
auto syncListenerRet = nebula::network::NetworkUtils::toHosts(FLAGS_meta_sync_listener);
if (!syncListenerRet.ok()) {
LOG(ERROR) << "Can't get meta listener address, status:" << syncListenerRet.status();
return;
}
auto syncHost = syncListenerRet.value();
if (syncHost.size() != 1) {
LOG(ERROR) << "The meta sync listener address is illegal: " << FLAGS_meta_sync_listener;
return;
}
syncListener = syncHost[0];
}

gMetaKVStore = initKV(peersRet.value(), syncListener, metaLocalhost);
if (gMetaKVStore == nullptr) {
LOG(ERROR) << "Init kv failed!";
return;
Expand Down
3 changes: 2 additions & 1 deletion src/drainer/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ set(drainer_test_deps
$<TARGET_OBJECTS:geo_index_obj>
)

if (NOT ENABLE_STANDALONE_VERSION)

nebula_add_test(
NAME
Expand Down Expand Up @@ -98,7 +99,6 @@ nebula_add_test(
gtest
)


nebula_add_test(
NAME
drainer_task_test
Expand All @@ -114,3 +114,4 @@ nebula_add_test(
wangle
gtest
)
endif()
1 change: 0 additions & 1 deletion src/graph/executor/test/StorageServerStub.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ std::shared_ptr<GraphStorageLocalServer> instance_ = nullptr;

void GraphStorageLocalServer::setThreadManager(
std::shared_ptr<apache::thrift::concurrency::ThreadManager> threadManager) {
// lock?
threadManager_ = threadManager;
}

Expand Down
1 change: 0 additions & 1 deletion src/storage/GraphStorageLocalServer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ std::shared_ptr<GraphStorageLocalServer> instance_ = nullptr;

void GraphStorageLocalServer::setThreadManager(
std::shared_ptr<apache::thrift::concurrency::ThreadManager> threadManager) {
// lock?
threadManager_ = threadManager;
}

Expand Down
2 changes: 2 additions & 0 deletions src/storage/stats/StorageStats.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,10 @@ void initStorageStats() {
kNumTagsDeleted = stats::StatsManager::registerStats("num_tags_deleted", "rate, sum");
kNumVerticesDeleted = stats::StatsManager::registerStats("num_vertices_deleted", "rate, sum");

#ifndef BUILD_STANDALONE
initMetaClientStats();
initKVStats();
#endif
}

} // namespace nebula
19 changes: 19 additions & 0 deletions tests/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,10 @@ TEST_AUDIT ?= false
gherkin_fmt = ~/.local/bin/reformat-gherkin
run_test = PYTHONPATH=$$PYTHONPATH:$(CURR_DIR)/.. $(CURR_DIR)/nebula-test-run.py
test_without_skip = python3 -m pytest -m "not skip"
test_without_skip_sa = python3 -m pytest -m "not skip and not distonly"
test_j = $(test_without_skip) -n$(J)
test_j_sa = $(test_without_skip_sa) -n$(J)


install-deps:
pip3 install --user -U setuptools wheel -i $(PYPI_MIRROR)
Expand Down Expand Up @@ -74,6 +77,18 @@ up: clean
--ca_signed=$(CA_SIGNED) \
--containerized=$(CONTAINERIZED)

standalone-up: clean
@mkdir -p $(CURR_DIR)/.pytest
$(run_test) --cmd=start_standalone \
--build_dir=$(BUILD_DIR) \
--debug=$(DEBUG) \
--multi_graphd=false \
--enable_ssl=$(ENABLE_SSL) \
--enable_graph_ssl=$(ENABLE_GRAPH_SSL) \
--enable_meta_ssl=$(ENABLE_META_SSL) \
--ca_signed=$(CA_SIGNED) \
--containerized=$(CONTAINERIZED)

down:
$(run_test) --cmd=stop --rm_dir=$(RM_DIR)

Expand All @@ -93,6 +108,9 @@ slow-query: currdir
$(test_j) tck/steps/test_kill_slow_query_via_same_service.py && \
$(test_j) tck/steps/test_kill_slow_query_via_different_service.py

standalone-tck: jobs
$(test_j_sa) tck/steps/test_tck.py

tck: jobs slow-query
$(test_j) tck/steps/test_tck.py
ifeq ($(TEST_AUDIT),true)
Expand All @@ -103,6 +121,7 @@ ldbc: currdir
$(test_j) tck/steps/test_ldbc.py

test-all: test tck ldbc
test-standalone-all: standalone-tck ldbc

fail: currdir
python3 -m pytest \
Expand Down
Loading

0 comments on commit da3f3ac

Please sign in to comment.