From c3fe937705efeb4c19189d41b442c8bae1c8dcab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Oct 2023 01:53:29 +0000 Subject: [PATCH 01/72] chore(deps): bump urllib3 from 1.26.5 to 1.26.18 in /for_devs Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.5 to 1.26.18. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/1.26.5...1.26.18) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- for_devs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/for_devs/requirements.txt b/for_devs/requirements.txt index f1ab785a7d..1197a63bba 100644 --- a/for_devs/requirements.txt +++ b/for_devs/requirements.txt @@ -32,7 +32,7 @@ rlp==2.0.1 six==1.16.0 toolz==0.11.1 typing-extensions==3.10.0.0 -urllib3==1.26.5 +urllib3==1.26.18 varint==1.0.2 web3==5.20.0 websockets==8.1 From d617c7abf82e67a13cce18146fa8db8f77a3f758 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Nov 2023 23:57:24 +0000 Subject: [PATCH 02/72] chore(deps): bump aiohttp from 3.8.5 to 3.8.6 in /for_devs Bumps [aiohttp](https://github.com/aio-libs/aiohttp) from 3.8.5 to 3.8.6. - [Release notes](https://github.com/aio-libs/aiohttp/releases) - [Changelog](https://github.com/aio-libs/aiohttp/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/aiohttp/compare/v3.8.5...v3.8.6) --- updated-dependencies: - dependency-name: aiohttp dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- for_devs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/for_devs/requirements.txt b/for_devs/requirements.txt index cf2e2523cb..632ce7beb5 100644 --- a/for_devs/requirements.txt +++ b/for_devs/requirements.txt @@ -1,4 +1,4 @@ -aiohttp==3.8.5 +aiohttp==3.8.6 async-timeout==3.0.1 attrs==21.2.0 base58==2.1.0 From 5b3e834b095322cacec5402cfe0e806ca104876d Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 6 Nov 2023 20:57:46 +0100 Subject: [PATCH 03/72] chore: add support for new rocksDB --- CMakeModules/cpp_graphql_gen.cmake | 1 + CMakeModules/rocksDB_gen.cmake | 30 ++++++++++++++++++++++++++++++ conanfile.py | 3 --- libraries/core_libs/CMakeLists.txt | 3 ++- submodules/CMakeLists.txt | 4 ++-- submodules/taraxa-evm | 2 +- 6 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 CMakeModules/rocksDB_gen.cmake diff --git a/CMakeModules/cpp_graphql_gen.cmake b/CMakeModules/cpp_graphql_gen.cmake index 74fbabdcf3..e4c9b04b35 100644 --- a/CMakeModules/cpp_graphql_gen.cmake +++ b/CMakeModules/cpp_graphql_gen.cmake @@ -7,6 +7,7 @@ FetchContent_Declare( cppgraphqlgen GIT_REPOSITORY https://github.com/microsoft/cppgraphqlgen.git GIT_TAG 1d659227bfc51fb7d9bb5dc9862234e7cfd1b1e3 # v4.5.4 + GIT_SHALLOW TRUE ) set(GRAPHQL_BUILD_TESTS OFF) set(GRAPHQL_UPDATE_VERSION OFF) diff --git a/CMakeModules/rocksDB_gen.cmake b/CMakeModules/rocksDB_gen.cmake new file mode 100644 index 0000000000..4ec1d878bd --- /dev/null +++ b/CMakeModules/rocksDB_gen.cmake @@ -0,0 +1,30 @@ +# ========================================================================== # +# RocksDB key-value store # +# ========================================================================== # +include(FetchContent) + +set(Boost_NO_WARN_NEW_VERSIONS 1) + +FetchContent_Declare( + rocksdb + GIT_REPOSITORY https://github.com/facebook/rocksdb + GIT_TAG v8.5.3 + GIT_SHALLOW TRUE +) + +FetchContent_GetProperties(rocksdb) + +message(STATUS "Populating rocksdb") +set(USE_RTTI 1) +set(WITH_LZ4 ON) +set(WITH_TESTS OFF CACHE INTERNAL "") +set(WITH_JNI OFF CACHE INTERNAL "") +set(WITH_TOOLS OFF CACHE INTERNAL "") +set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") +set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") +set(CMAKE_ENABLE_SHARED OFF CACHE INTERNAL "") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") + +FetchContent_Populate(rocksdb) +add_subdirectory(${rocksdb_SOURCE_DIR} ${rocksdb_BINARY_DIR} EXCLUDE_FROM_ALL) + \ No newline at end of file diff --git a/conanfile.py b/conanfile.py index 49e27f5ef8..f5755b2ade 100644 --- a/conanfile.py +++ b/conanfile.py @@ -19,7 +19,6 @@ def requirements(self): self.requires("cryptopp/8.7.0") self.requires("gtest/1.14.0") self.requires("lz4/1.9.4") - self.requires("rocksdb/6.29.5") self.requires("prometheus-cpp/1.1.0") self.requires("jsoncpp/1.9.5") @@ -61,8 +60,6 @@ def configure(self): self.options["gtest"].build_gmock = False # this links cppcheck to prce library self.options["cppcheck"].have_rules = False - self.options["rocksdb"].use_rtti = True - self.options["rocksdb"].with_lz4 = True # mpir is required by cppcheck and it causing gmp confict self.options["mpir"].enable_gmpcompat = False diff --git a/libraries/core_libs/CMakeLists.txt b/libraries/core_libs/CMakeLists.txt index 1e0655003e..4309c6f927 100644 --- a/libraries/core_libs/CMakeLists.txt +++ b/libraries/core_libs/CMakeLists.txt @@ -10,6 +10,7 @@ file(GLOB_RECURSE NETWORK_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/network/*.cpp) include(${PROJECT_SOURCE_DIR}/CMakeModules/cpp_graphql_gen.cmake) file(GLOB_RECURSE NETWORK_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/network/*.h) +include(${PROJECT_SOURCE_DIR}/CMakeModules/rocksDB_gen.cmake) file(GLOB_RECURSE STORAGE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/storage/*.hpp) file(GLOB_RECURSE STORAGE_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/storage/*.cpp) @@ -56,7 +57,7 @@ target_link_libraries(core_libs PUBLIC p2p metrics Jsonrpccpp - CONAN_PKG::rocksdb + rocksdb # GraphQL cppgraphqlgen::graphqlservice diff --git a/submodules/CMakeLists.txt b/submodules/CMakeLists.txt index 9f559d1a0c..48fae128a5 100644 --- a/submodules/CMakeLists.txt +++ b/submodules/CMakeLists.txt @@ -84,11 +84,11 @@ add_make_target(vrf libsodium.a "${VRF_AUTOTOOLS_CMD} && ${CMAKE_MAKE_PROGRAM} & # Add taraxa-evm set(EVM_BUILD_DIR ${BUILD_DIR_PREFIX}/taraxa-evm) ## add include of libs -set(EVM_BUILD_INCLUDE -I${CONAN_INCLUDE_DIRS_ROCKSDB}) +set(EVM_BUILD_INCLUDE -I${rocksdb_SOURCE_DIR}/include) ## set C flags set(EVM_BUILD_CGO_CFLAGS -O3 ${EVM_BUILD_INCLUDE}) ## add link of libs -set(EVM_BUILD_LD -L${CONAN_LIB_DIRS_ROCKSDB} -lrocksdb -L${CONAN_LIB_DIRS_LZ4} -llz4) +set(EVM_BUILD_LD -L${ROCKSDB_LIB} -lrocksdb -L${CONAN_LIB_DIRS_LZ4} -llz4) ## if we need full static build use flag if(TARAXA_STATIC_BUILD) if (NOT APPLE) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index b4f3ffd579..d654aabf76 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit b4f3ffd5791b3aefd738273eaccf073363a2b117 +Subproject commit d654aabf767f7cd24207c641031e5271c48dd123 From 04f8eb486fbb10a2648bbb14add85066d6ac5e23 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 6 Nov 2023 21:19:06 +0100 Subject: [PATCH 04/72] add missing dependency --- builder.Dockerfile | 1 + doc/building.md | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/builder.Dockerfile b/builder.Dockerfile index 1ecf5c8458..2cebc08e13 100644 --- a/builder.Dockerfile +++ b/builder.Dockerfile @@ -46,6 +46,7 @@ RUN add-apt-repository ppa:ethereum/ethereum \ binutils \ cmake \ ccache \ + libgflags-dev \ # this libs are required for arm build by go part libzstd-dev \ libsnappy-dev \ diff --git a/doc/building.md b/doc/building.md index ee9b3f8d1b..3eeda25ba0 100644 --- a/doc/building.md +++ b/doc/building.md @@ -24,6 +24,7 @@ will build out of the box without further effort: clang-tidy-14 \ golang-go \ python3-pip \ + libgflags-dev \ # this libs are required for arm build by go part. you can skip it for amd64 build libzstd-dev \ libsnappy-dev \ @@ -168,7 +169,7 @@ And optional: First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Clang-14 is used for compilation. brew update - brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd + brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd ### Clone the Repository From 61253e0b47c3f2b58a7c8251ef3e0fcc7b707e82 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 6 Nov 2023 21:30:20 +0100 Subject: [PATCH 05/72] Revert "add missing dependency" This reverts commit f74c65932aa51ff444d0379b0b9cd5b710b5d2b7. --- builder.Dockerfile | 1 - doc/building.md | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/builder.Dockerfile b/builder.Dockerfile index 2cebc08e13..1ecf5c8458 100644 --- a/builder.Dockerfile +++ b/builder.Dockerfile @@ -46,7 +46,6 @@ RUN add-apt-repository ppa:ethereum/ethereum \ binutils \ cmake \ ccache \ - libgflags-dev \ # this libs are required for arm build by go part libzstd-dev \ libsnappy-dev \ diff --git a/doc/building.md b/doc/building.md index 3eeda25ba0..ee9b3f8d1b 100644 --- a/doc/building.md +++ b/doc/building.md @@ -24,7 +24,6 @@ will build out of the box without further effort: clang-tidy-14 \ golang-go \ python3-pip \ - libgflags-dev \ # this libs are required for arm build by go part. you can skip it for amd64 build libzstd-dev \ libsnappy-dev \ @@ -169,7 +168,7 @@ And optional: First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Clang-14 is used for compilation. brew update - brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd + brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd ### Clone the Repository From 3033f9f2fffd0fc30c93a22a3d00035d7a168b11 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 6 Nov 2023 21:30:44 +0100 Subject: [PATCH 06/72] tune building --- CMakeModules/rocksDB_gen.cmake | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeModules/rocksDB_gen.cmake b/CMakeModules/rocksDB_gen.cmake index 4ec1d878bd..92d69465d8 100644 --- a/CMakeModules/rocksDB_gen.cmake +++ b/CMakeModules/rocksDB_gen.cmake @@ -17,14 +17,16 @@ FetchContent_GetProperties(rocksdb) message(STATUS "Populating rocksdb") set(USE_RTTI 1) set(WITH_LZ4 ON) +set(WITH_GFLAGS OFF) +set(FAIL_ON_WARNINGS OFF) set(WITH_TESTS OFF CACHE INTERNAL "") set(WITH_JNI OFF CACHE INTERNAL "") set(WITH_TOOLS OFF CACHE INTERNAL "") set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") +set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") set(CMAKE_ENABLE_SHARED OFF CACHE INTERNAL "") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") FetchContent_Populate(rocksdb) add_subdirectory(${rocksdb_SOURCE_DIR} ${rocksdb_BINARY_DIR} EXCLUDE_FROM_ALL) - \ No newline at end of file From 334c47c4d8f630abdba68d2ec214eea335932dbb Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 7 Nov 2023 08:07:25 +0100 Subject: [PATCH 07/72] fix cmake policy --- CMakeModules/rocksDB_gen.cmake | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeModules/rocksDB_gen.cmake b/CMakeModules/rocksDB_gen.cmake index 92d69465d8..67e380ece6 100644 --- a/CMakeModules/rocksDB_gen.cmake +++ b/CMakeModules/rocksDB_gen.cmake @@ -3,7 +3,7 @@ # ========================================================================== # include(FetchContent) -set(Boost_NO_WARN_NEW_VERSIONS 1) +set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) FetchContent_Declare( rocksdb @@ -30,3 +30,4 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-const-variable -Wno-er FetchContent_Populate(rocksdb) add_subdirectory(${rocksdb_SOURCE_DIR} ${rocksdb_BINARY_DIR} EXCLUDE_FROM_ALL) +include_directories("${rocksdb_SOURCE_DIR}/include") From 992a30e3ed57e585b3046ca270a0679cbda5bcbc Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 7 Nov 2023 09:36:01 +0100 Subject: [PATCH 08/72] anotehr try --- CMakeLists.txt | 3 +++ CMakeModules/{rocksDB_gen.cmake => rocksdb.cmake} | 0 libraries/core_libs/CMakeLists.txt | 1 - 3 files changed, 3 insertions(+), 1 deletion(-) rename CMakeModules/{rocksDB_gen.cmake => rocksdb.cmake} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6d3042c8fc..bfcd09a7aa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -216,6 +216,9 @@ include(ExternalProject) set(JSONCPP_INCLUDE_DIR ${CONAN_INCLUDE_DIRS_JSONCPP}) include(ProjectJSONRPCCPP) +# rocksdb build +include(${PROJECT_SOURCE_DIR}/CMakeModules/rocksdb.cmake) + # Add sub-directories cmakes add_subdirectory(submodules) add_subdirectory(libraries) diff --git a/CMakeModules/rocksDB_gen.cmake b/CMakeModules/rocksdb.cmake similarity index 100% rename from CMakeModules/rocksDB_gen.cmake rename to CMakeModules/rocksdb.cmake diff --git a/libraries/core_libs/CMakeLists.txt b/libraries/core_libs/CMakeLists.txt index 4309c6f927..925fc5b572 100644 --- a/libraries/core_libs/CMakeLists.txt +++ b/libraries/core_libs/CMakeLists.txt @@ -10,7 +10,6 @@ file(GLOB_RECURSE NETWORK_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/network/*.cpp) include(${PROJECT_SOURCE_DIR}/CMakeModules/cpp_graphql_gen.cmake) file(GLOB_RECURSE NETWORK_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/network/*.h) -include(${PROJECT_SOURCE_DIR}/CMakeModules/rocksDB_gen.cmake) file(GLOB_RECURSE STORAGE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/storage/*.hpp) file(GLOB_RECURSE STORAGE_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/storage/*.cpp) From 7003039da84429c5a00099d903b7026f52b6dc05 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 7 Nov 2023 10:14:22 +0100 Subject: [PATCH 09/72] another try --- CMakeModules/rocksdb.cmake | 2 +- submodules/CMakeLists.txt | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CMakeModules/rocksdb.cmake b/CMakeModules/rocksdb.cmake index 67e380ece6..60a25a874e 100644 --- a/CMakeModules/rocksdb.cmake +++ b/CMakeModules/rocksdb.cmake @@ -25,7 +25,7 @@ set(WITH_TOOLS OFF CACHE INTERNAL "") set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") -set(CMAKE_ENABLE_SHARED OFF CACHE INTERNAL "") +set(CMAKE_ENABLE_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") FetchContent_Populate(rocksdb) diff --git a/submodules/CMakeLists.txt b/submodules/CMakeLists.txt index 48fae128a5..117cbcd879 100644 --- a/submodules/CMakeLists.txt +++ b/submodules/CMakeLists.txt @@ -88,7 +88,7 @@ set(EVM_BUILD_INCLUDE -I${rocksdb_SOURCE_DIR}/include) ## set C flags set(EVM_BUILD_CGO_CFLAGS -O3 ${EVM_BUILD_INCLUDE}) ## add link of libs -set(EVM_BUILD_LD -L${ROCKSDB_LIB} -lrocksdb -L${CONAN_LIB_DIRS_LZ4} -llz4) +set(EVM_BUILD_LD -L${CMAKE_BINARY_DIR}/lib -lrocksdb -L${CONAN_LIB_DIRS_LZ4} -llz4) ## if we need full static build use flag if(TARAXA_STATIC_BUILD) if (NOT APPLE) @@ -126,6 +126,7 @@ add_custom_command( COMMENT "Building taraxa-evm library") add_custom_target(taraxa_evm_build DEPENDS ${EVM_BUILD_DIR}/lib/${EVM_LIBRARY_NAME}) +add_dependencies(taraxa_evm_build rocksdb) add_library(taraxa-evm INTERFACE) add_dependencies(taraxa-evm taraxa_evm_build) From 94217daa167057c6e179b8a14067ac821d533153 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 7 Nov 2023 11:39:46 +0100 Subject: [PATCH 10/72] small changes --- CMakeModules/rocksdb.cmake | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/CMakeModules/rocksdb.cmake b/CMakeModules/rocksdb.cmake index 60a25a874e..00ff3b80a8 100644 --- a/CMakeModules/rocksdb.cmake +++ b/CMakeModules/rocksdb.cmake @@ -26,8 +26,6 @@ set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") set(CMAKE_ENABLE_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") -FetchContent_Populate(rocksdb) -add_subdirectory(${rocksdb_SOURCE_DIR} ${rocksdb_BINARY_DIR} EXCLUDE_FROM_ALL) -include_directories("${rocksdb_SOURCE_DIR}/include") +FetchContent_MakeAvailable(rocksdb) From 127ba4e658e7430bba8771f9873aae15a3ec04fa Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 7 Nov 2023 14:01:57 +0100 Subject: [PATCH 11/72] more changes --- CMakeModules/rocksdb.cmake | 4 +++- Dockerfile | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CMakeModules/rocksdb.cmake b/CMakeModules/rocksdb.cmake index 00ff3b80a8..30c14102da 100644 --- a/CMakeModules/rocksdb.cmake +++ b/CMakeModules/rocksdb.cmake @@ -25,7 +25,9 @@ set(WITH_TOOLS OFF CACHE INTERNAL "") set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") -set(CMAKE_ENABLE_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") +set(ROCKSDB_BUILD_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") FetchContent_MakeAvailable(rocksdb) + +install(DIRECTORY "${rocksdb_SOURCE_DIR}/include" DESTINATION include) diff --git a/Dockerfile b/Dockerfile index f7dcc07e97..a261b40210 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,7 +43,7 @@ WORKDIR /root/.taraxa # Copy required binaries COPY --from=build /opt/taraxa/$BUILD_OUTPUT_DIR/bin/taraxad /usr/local/bin/taraxad COPY --from=build /opt/taraxa/$BUILD_OUTPUT_DIR/bin/taraxa-bootnode /usr/local/bin/taraxa-bootnode -COPY --from=build /opt/taraxa/$BUILD_OUTPUT_DIR/lib/*.so /usr/local/lib/ +COPY --from=build /opt/taraxa/$BUILD_OUTPUT_DIR/lib/*.so* /usr/local/lib/ # Copy scripts COPY scripts/taraxa-sign.py /usr/local/bin/taraxa-sign From fc62d023b5eea954f1299b775f33e5d80a7a4495 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 7 Nov 2023 15:27:49 +0100 Subject: [PATCH 12/72] another try --- CMakeModules/rocksdb.cmake | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/CMakeModules/rocksdb.cmake b/CMakeModules/rocksdb.cmake index 30c14102da..968df04ac4 100644 --- a/CMakeModules/rocksdb.cmake +++ b/CMakeModules/rocksdb.cmake @@ -7,27 +7,27 @@ set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) FetchContent_Declare( rocksdb - GIT_REPOSITORY https://github.com/facebook/rocksdb + GIT_REPOSITORY https://github.com/facebook/rocksdb.git GIT_TAG v8.5.3 GIT_SHALLOW TRUE ) FetchContent_GetProperties(rocksdb) +if(NOT rocksdb_POPULATED) + message(STATUS "Populating rocksdb") + set(USE_RTTI 1) + set(WITH_LZ4 ON) + set(WITH_GFLAGS OFF) + set(FAIL_ON_WARNINGS OFF) + set(WITH_TESTS OFF CACHE INTERNAL "") + set(WITH_JNI OFF CACHE INTERNAL "") + set(WITH_TOOLS OFF CACHE INTERNAL "") + set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") + set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") + set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") + set(ROCKSDB_BUILD_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") -message(STATUS "Populating rocksdb") -set(USE_RTTI 1) -set(WITH_LZ4 ON) -set(WITH_GFLAGS OFF) -set(FAIL_ON_WARNINGS OFF) -set(WITH_TESTS OFF CACHE INTERNAL "") -set(WITH_JNI OFF CACHE INTERNAL "") -set(WITH_TOOLS OFF CACHE INTERNAL "") -set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") -set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") -set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") -set(ROCKSDB_BUILD_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") - -FetchContent_MakeAvailable(rocksdb) - -install(DIRECTORY "${rocksdb_SOURCE_DIR}/include" DESTINATION include) + add_subdirectory(${rocksdb_SOURCE_DIR} ${rocksdb_BINARY_DIR}) + FetchContent_Populate(rocksdb) +endif() \ No newline at end of file From e0f4171efb002b7f8d26e3d4d1762e006c180e19 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 7 Nov 2023 15:44:01 +0100 Subject: [PATCH 13/72] Revert "another try" This reverts commit 02c2df3255e53fdb00366a9208ffbfe0aeda0fac. --- CMakeModules/rocksdb.cmake | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/CMakeModules/rocksdb.cmake b/CMakeModules/rocksdb.cmake index 968df04ac4..30c14102da 100644 --- a/CMakeModules/rocksdb.cmake +++ b/CMakeModules/rocksdb.cmake @@ -7,27 +7,27 @@ set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) FetchContent_Declare( rocksdb - GIT_REPOSITORY https://github.com/facebook/rocksdb.git + GIT_REPOSITORY https://github.com/facebook/rocksdb GIT_TAG v8.5.3 GIT_SHALLOW TRUE ) FetchContent_GetProperties(rocksdb) -if(NOT rocksdb_POPULATED) - message(STATUS "Populating rocksdb") - set(USE_RTTI 1) - set(WITH_LZ4 ON) - set(WITH_GFLAGS OFF) - set(FAIL_ON_WARNINGS OFF) - set(WITH_TESTS OFF CACHE INTERNAL "") - set(WITH_JNI OFF CACHE INTERNAL "") - set(WITH_TOOLS OFF CACHE INTERNAL "") - set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") - set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") - set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") - set(ROCKSDB_BUILD_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") - add_subdirectory(${rocksdb_SOURCE_DIR} ${rocksdb_BINARY_DIR}) - FetchContent_Populate(rocksdb) -endif() \ No newline at end of file +message(STATUS "Populating rocksdb") +set(USE_RTTI 1) +set(WITH_LZ4 ON) +set(WITH_GFLAGS OFF) +set(FAIL_ON_WARNINGS OFF) +set(WITH_TESTS OFF CACHE INTERNAL "") +set(WITH_JNI OFF CACHE INTERNAL "") +set(WITH_TOOLS OFF CACHE INTERNAL "") +set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") +set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") +set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") +set(ROCKSDB_BUILD_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") + +FetchContent_MakeAvailable(rocksdb) + +install(DIRECTORY "${rocksdb_SOURCE_DIR}/include" DESTINATION include) From e34d4a855e6efc9cdc66292670d9e8cdeed28652 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 7 Nov 2023 15:45:49 +0100 Subject: [PATCH 14/72] remove install --- CMakeModules/rocksdb.cmake | 2 -- 1 file changed, 2 deletions(-) diff --git a/CMakeModules/rocksdb.cmake b/CMakeModules/rocksdb.cmake index 30c14102da..fa003f63db 100644 --- a/CMakeModules/rocksdb.cmake +++ b/CMakeModules/rocksdb.cmake @@ -29,5 +29,3 @@ set(ROCKSDB_BUILD_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") FetchContent_MakeAvailable(rocksdb) - -install(DIRECTORY "${rocksdb_SOURCE_DIR}/include" DESTINATION include) From da0559844267e78e9ffc3a3352789a5b3d6fae48 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 8 Nov 2023 14:37:28 +0100 Subject: [PATCH 15/72] try something --- Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index a261b40210..ad24f2b28f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,9 +20,10 @@ RUN mkdir $BUILD_OUTPUT_DIR && cd $BUILD_OUTPUT_DIR \ RUN cd $BUILD_OUTPUT_DIR && make -j$(nproc) all \ # Copy CMake generated Testfile to be able to trigger ctest from bin directory - && cp tests/CTestTestfile.cmake bin/ \ + && cp tests/CTestTestfile.cmake bin/; + # \ # keep only required shared libraries and final binaries - && find . -maxdepth 1 ! -name "lib" ! -name "bin" -exec rm -rfv {} \; + # && find . -maxdepth 1 ! -name "lib" ! -name "bin" -exec rm -rfv {} \; ############################################################################### ##### Taraxa image containing taraxad binary + dynamic libraries + config ##### From 8bce8bed77c995edcdb385e72fc597b2d9d834f5 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 8 Nov 2023 16:39:32 +0100 Subject: [PATCH 16/72] point to the LD --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index ad24f2b28f..eeb063441d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,6 +25,9 @@ RUN cd $BUILD_OUTPUT_DIR && make -j$(nproc) all \ # keep only required shared libraries and final binaries # && find . -maxdepth 1 ! -name "lib" ! -name "bin" -exec rm -rfv {} \; +# Set LD_LIBRARY_PATH so taraxad binary finds shared libs +ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib + ############################################################################### ##### Taraxa image containing taraxad binary + dynamic libraries + config ##### ############################################################################### From 718a36c397f98495a1584d894043a16588ddcac6 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Wed, 22 Nov 2023 15:25:13 +0100 Subject: [PATCH 17/72] fix: go tests issue --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index eeb063441d..5f1f60d1a4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,6 +27,7 @@ RUN cd $BUILD_OUTPUT_DIR && make -j$(nproc) all \ # Set LD_LIBRARY_PATH so taraxad binary finds shared libs ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib +ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/taraxa/$BUILD_OUTPUT_DIR/lib/ ############################################################################### ##### Taraxa image containing taraxad binary + dynamic libraries + config ##### From 70ec8819772062cbdb8588bad30bf2be6ea4bf1e Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 22 Nov 2023 16:32:46 +0100 Subject: [PATCH 18/72] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index d654aabf76..5de4dd601e 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit d654aabf767f7cd24207c641031e5271c48dd123 +Subproject commit 5de4dd601e35f29d954fd1588827354d939ff89c From 337421c5b0a3594a1a7ee33bd95beaf054191188 Mon Sep 17 00:00:00 2001 From: Leonard Mocanu Date: Wed, 22 Nov 2023 18:07:59 +0200 Subject: [PATCH 19/72] chore: fixes builder image push --- .circleci/config.yml | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 92a0586b60..4bb4558b0d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -484,6 +484,24 @@ commands: docker push taraxa/${IMAGE}:${RC_TAG} fi + push_builder: + description: Push images to Docker Hub (builder) + steps: + - run: + name: Login into Docker Hub + command: | + echo ${DOCKERHUB_PASS} | docker login -u taraxa --password-stdin + + - run: + name: Push Images + command: | + if [[ ${CIRCLE_TAG} != "" ]];then + TAG=$(echo ${CIRCLE_TAG} | sed 's/^builder-//g') + docker push taraxa/${IMAGE}:${TAG} + docker push taraxa/${IMAGE}:latest + else + docker push taraxa/${IMAGE}:${VERSION} + fi test: description: Run tests @@ -734,7 +752,7 @@ jobs: command: | docker history taraxa-builder:${VERSION} - tag_builder - - push_dockerhub + - push_builder release-docker-image: From 6e2333fbb1ca0ed874b8e4803ee6888bae88ec62 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 22 Nov 2023 22:25:51 +0100 Subject: [PATCH 20/72] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 5de4dd601e..c067e03759 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 5de4dd601e35f29d954fd1588827354d939ff89c +Subproject commit c067e03759ca18fbe98c75d5857cfcd840d46e6a From be3c3cfc67981168434ace20611fa576a6695809 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 23 Nov 2023 10:17:34 +0100 Subject: [PATCH 21/72] chore: testing new way --- Dockerfile | 1 - tests/CMakeLists.txt | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 5f1f60d1a4..eeb063441d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,7 +27,6 @@ RUN cd $BUILD_OUTPUT_DIR && make -j$(nproc) all \ # Set LD_LIBRARY_PATH so taraxad binary finds shared libs ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib -ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/taraxa/$BUILD_OUTPUT_DIR/lib/ ############################################################################### ##### Taraxa image containing taraxad binary + dynamic libraries + config ##### diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f9613497b3..d120dea503 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -112,6 +112,7 @@ set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${GO_TEST_SCRIPT} file(WRITE ${GO_TEST_SCRIPT} "#!/bin/bash cd ${CMAKE_SOURCE_DIR}/submodules/taraxa-evm/ +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${CMAKE_BINARY_DIR}/lib export ${EVM_BUILD_ENV_STRING} go test ./..." ) From 0b2521f0e381e1883a919f3b0ebb5abc9d6398d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Nov 2023 16:55:23 +0000 Subject: [PATCH 22/72] chore(deps): bump eth-abi from 2.1.1 to 4.2.0 in /for_devs Bumps [eth-abi](https://github.com/ethereum/eth-abi) from 2.1.1 to 4.2.0. - [Release notes](https://github.com/ethereum/eth-abi/releases) - [Changelog](https://github.com/ethereum/eth-abi/blob/master/docs/release_notes.rst) - [Commits](https://github.com/ethereum/eth-abi/compare/v2.1.1...v4.2.0) --- updated-dependencies: - dependency-name: eth-abi dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- for_devs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/for_devs/requirements.txt b/for_devs/requirements.txt index cf2e2523cb..1510c7f085 100644 --- a/for_devs/requirements.txt +++ b/for_devs/requirements.txt @@ -7,7 +7,7 @@ certifi==2023.7.22 chardet==4.0.0 click==8.0.1 cytoolz==0.11.0 -eth-abi==2.1.1 +eth-abi==4.2.0 eth-account==0.5.9 eth-hash==0.3.1 eth-keyfile==0.5.1 From be0b94c51a451f93ff1fbc9a40305220ef454785 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 00:48:14 +0000 Subject: [PATCH 23/72] chore(deps): bump aiohttp from 3.8.5 to 3.9.0 in /for_devs Bumps [aiohttp](https://github.com/aio-libs/aiohttp) from 3.8.5 to 3.9.0. - [Release notes](https://github.com/aio-libs/aiohttp/releases) - [Changelog](https://github.com/aio-libs/aiohttp/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/aiohttp/compare/v3.8.5...v3.9.0) --- updated-dependencies: - dependency-name: aiohttp dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- for_devs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/for_devs/requirements.txt b/for_devs/requirements.txt index cf2e2523cb..5922771387 100644 --- a/for_devs/requirements.txt +++ b/for_devs/requirements.txt @@ -1,4 +1,4 @@ -aiohttp==3.8.5 +aiohttp==3.9.0 async-timeout==3.0.1 attrs==21.2.0 base58==2.1.0 From e07c24c607a25af44e5731e9ae1b90cdb50d3748 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 23 Nov 2023 14:13:30 +0100 Subject: [PATCH 24/72] fix: pbft syncing gossip transition --- .../cli/config_jsons/default/default_config.json | 2 +- .../cli/config_jsons/devnet/devnet_config.json | 2 +- .../cli/config_jsons/mainnet/mainnet_config.json | 2 +- .../cli/config_jsons/testnet/testnet_config.json | 2 +- .../core_libs/consensus/src/pbft/pbft_manager.cpp | 2 +- .../latest/pbft_sync_packet_handler.hpp | 2 ++ .../latest/pbft_sync_packet_handler.cpp | 12 +++++++++--- 7 files changed, 16 insertions(+), 8 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_config.json b/libraries/cli/include/cli/config_jsons/default/default_config.json index 8719620223..35c31d9ef6 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_config.json +++ b/libraries/cli/include/cli/config_jsons/default/default_config.json @@ -34,7 +34,7 @@ "sync_level_size": 25, "packets_processing_threads": 14, "peer_blacklist_timeout": 600, - "deep_syncing_threshold": 10, + "deep_syncing_threshold": 50, "boot_nodes": [ ] }, diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json index ad76cfe233..9afbc21dc1 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json @@ -34,7 +34,7 @@ "sync_level_size": 25, "packets_processing_threads": 14, "peer_blacklist_timeout": 600, - "deep_syncing_threshold": 10, + "deep_syncing_threshold": 50, "boot_nodes": [ { "id": "fdcf4c860d9bb1f17608cbf2dd10ac3ae8d0ba41aa20b3e43fb85a72617a356f8609475d68b44e25dd508a0e5b36da75e7ae9aaf93360f4f002464d1d75fd353", diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json index b1dcc8d7a0..66cb1b4499 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json @@ -34,7 +34,7 @@ "sync_level_size": 10, "packets_processing_threads": 14, "peer_blacklist_timeout": 600, - "deep_syncing_threshold": 10, + "deep_syncing_threshold": 50, "boot_nodes": [ { "id": "45949587c9f31f62e802175471c142da1618f4e456a77e51b0fcb3cc14b14bba7f585d44db15417fff96c29967a4778c60584ced00148bc905609a14fa7e538f", diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json index f414d9f0a5..985b388255 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json @@ -34,7 +34,7 @@ "sync_level_size": 10, "packets_processing_threads": 14, "peer_blacklist_timeout": 600, - "deep_syncing_threshold": 10, + "deep_syncing_threshold": 50, "boot_nodes": [ { "id": "f36f467529fe91a750dfdc8086fd0d2f30bad9f55a5800b6b4aa603c7787501db78dc4ac1bf3cf16e42af7c2ebb53648653013c3da1987494960d751871d598a", diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 59c04938ea..62204eb196 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1444,7 +1444,7 @@ void PbftManager::pushSyncedPbftBlocksIntoChain() { if (pushPbftBlock_(std::move(period_data.first), std::move(period_data.second))) { LOG(log_dg_) << "Pushed synced PBFT block " << pbft_block_hash << " with period " << pbft_block_period; - net->setSyncStatePeriod(pbft_block_period); + net->setSyncStatePeriod(pbftSyncingPeriod()); } else { LOG(log_er_) << "Failed push PBFT block " << pbft_block_hash << " with period " << pbft_block_period; break; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp index 1f37e31eaf..1eba2f3f4d 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp @@ -30,6 +30,8 @@ class PbftSyncPacketHandler : public ExtSyncingPacketHandler { void pbftSyncComplete(); void delayedPbftSync(int counter); + static constexpr uint32_t kDelayedPbftSyncDelayMs = 10; + std::shared_ptr vote_mgr_; util::ThreadPool periodic_events_tp_; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index 0c2a9e9838..e90ead3577 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -95,6 +95,12 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, << packet_data.from_node_id_ << " already present in chain"; } else { if (pbft_block_period != pbft_mgr_->pbftSyncingPeriod() + 1) { + // This can happen if we just got synced and block was cert voted + if (pbft_chain_synced && pbft_block_period == pbft_mgr_->pbftSyncingPeriod()) { + pbftSyncComplete(); + return; + } + LOG(log_er_) << "Block " << pbft_blk_hash << " period unexpected: " << pbft_block_period << ". Expected period: " << pbft_mgr_->pbftSyncingPeriod() + 1; return; @@ -207,7 +213,7 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { LOG(log_tr_) << "Syncing pbft blocks too fast than processing. Has synced period " << pbft_sync_period << ", PBFT chain size " << pbft_chain_->getPbftChainSize(); - periodic_events_tp_.post(1000, [this] { delayedPbftSync(1); }); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { delayedPbftSync(1); }); } else { if (!syncPeerPbft(pbft_sync_period + 1)) { pbft_syncing_state_->setPbftSyncing(false); @@ -230,7 +236,7 @@ void PbftSyncPacketHandler::pbftSyncComplete() { if (pbft_mgr_->periodDataQueueSize()) { LOG(log_tr_) << "Syncing pbft blocks faster than processing. Remaining sync size " << pbft_mgr_->periodDataQueueSize(); - periodic_events_tp_.post(1000, [this] { pbftSyncComplete(); }); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { pbftSyncComplete(); }); } else { LOG(log_dg_) << "Syncing PBFT is completed"; // We are pbft synced with the node we are connected to but @@ -259,7 +265,7 @@ void PbftSyncPacketHandler::delayedPbftSync(int counter) { if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { LOG(log_tr_) << "Syncing pbft blocks faster than processing " << pbft_sync_period << " " << pbft_chain_->getPbftChainSize(); - periodic_events_tp_.post(1000, [this, counter] { delayedPbftSync(counter + 1); }); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this, counter] { delayedPbftSync(counter + 1); }); } else { if (!syncPeerPbft(pbft_sync_period + 1)) { pbft_syncing_state_->setPbftSyncing(false); From 6b19aafd880f3ad922dc23edb2ca654addb474c9 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 9 Nov 2023 15:15:39 +0100 Subject: [PATCH 25/72] chore: improve syncing speed --- .../common/include/common/vrf_wrapper.hpp | 5 +- libraries/common/src/vrf_wrapper.cpp | 22 ++-- .../include/final_chain/final_chain.hpp | 5 + .../consensus/include/pbft/pbft_manager.hpp | 19 ++-- .../include/vote_manager/vote_manager.hpp | 3 +- .../consensus/src/final_chain/final_chain.cpp | 22 ++-- .../consensus/src/pbft/pbft_manager.cpp | 51 +++------ .../consensus/src/pbft/period_data_queue.cpp | 4 +- .../src/vote_manager/vote_manager.cpp | 4 +- .../network/include/network/ws_server.hpp | 1 + .../latest/get_pbft_sync_packet_handler.cpp | 3 +- .../v1/get_pbft_sync_packet_handler.cpp | 3 +- libraries/core_libs/network/src/ws_server.cpp | 5 + libraries/core_libs/node/src/node.cpp | 100 +++++++++++------- libraries/types/vote/include/vote/vote.hpp | 2 +- .../types/vote/include/vote/vrf_sortition.hpp | 6 +- tests/full_node_test.cpp | 2 +- 17 files changed, 138 insertions(+), 119 deletions(-) diff --git a/libraries/common/include/common/vrf_wrapper.hpp b/libraries/common/include/common/vrf_wrapper.hpp index f1107f7417..d233b6de70 100644 --- a/libraries/common/include/common/vrf_wrapper.hpp +++ b/libraries/common/include/common/vrf_wrapper.hpp @@ -22,7 +22,8 @@ bool isValidVrfPublicKey(vrf_pk_t const &pk); // get proof if public is valid std::optional getVrfProof(vrf_sk_t const &pk, bytes const &msg); // get output if proff is valid -std::optional getVrfOutput(vrf_pk_t const &pk, vrf_proof_t const &proof, bytes const &msg); +std::optional getVrfOutput(vrf_pk_t const &pk, vrf_proof_t const &proof, bytes const &msg, + bool strict = true); class VrfSortitionBase { public: @@ -38,7 +39,7 @@ class VrfSortitionBase { static dev::bytes makeVrfInput(taraxa::level_t level, const dev::h256 &period_hash); - bool verify(const vrf_pk_t &pk, const bytes &msg, uint16_t vote_count = 1) const; + bool verify(const vrf_pk_t &pk, const bytes &msg, uint16_t vote_count = 1, bool strict = true) const; bool operator==(VrfSortitionBase const &other) const { return proof_ == other.proof_ && output_ == other.output_; } diff --git a/libraries/common/src/vrf_wrapper.cpp b/libraries/common/src/vrf_wrapper.cpp index fcf01f4103..6091ad3408 100644 --- a/libraries/common/src/vrf_wrapper.cpp +++ b/libraries/common/src/vrf_wrapper.cpp @@ -29,12 +29,17 @@ std::optional getVrfProof(vrf_sk_t const &sk, bytes const &msg) { return {}; } -std::optional getVrfOutput(vrf_pk_t const &pk, vrf_proof_t const &proof, bytes const &msg) { +std::optional getVrfOutput(vrf_pk_t const &pk, vrf_proof_t const &proof, bytes const &msg, bool strict) { vrf_output_t output; - // crypto_vrf_verify return 0 on success! - if (!crypto_vrf_verify(output.data(), const_cast(pk.data()), - const_cast(proof.data()), const_cast(msg.data()), - msg.size())) { + if (strict) { + // crypto_vrf_verify return 0 on success! + if (!crypto_vrf_verify(output.data(), const_cast(pk.data()), + const_cast(proof.data()), const_cast(msg.data()), + msg.size())) { + return output; + } + } else { + crypto_vrf_proof_to_hash(output.data(), const_cast(proof.data())); return output; } return {}; @@ -47,11 +52,8 @@ dev::bytes VrfSortitionBase::makeVrfInput(taraxa::level_t level, const dev::h256 return s.invalidate(); } -bool VrfSortitionBase::verify(const vrf_pk_t &pk, const bytes &msg, uint16_t vote_count) const { - if (!isValidVrfPublicKey(pk)) { - return false; - } - auto res = vrf_wrapper::getVrfOutput(pk, proof_, msg); +bool VrfSortitionBase::verify(const vrf_pk_t &pk, const bytes &msg, uint16_t vote_count, bool strict) const { + auto res = vrf_wrapper::getVrfOutput(pk, proof_, msg, strict); if (res != std::nullopt) { output_ = res.value(); thresholdFromOutput(vote_count); diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 3101cdd4f6..70e95043cd 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -223,6 +223,11 @@ class FinalChain { */ virtual void prune(EthBlockNumber blk_n) = 0; + /** + * @brief Wait until next block is finalized + */ + virtual void wait_for_finalized() = 0; + virtual std::vector dpos_validators_total_stakes(EthBlockNumber blk_num) const = 0; // TODO move out of here: diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 331c0f5816..4ba7185add 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -194,13 +194,6 @@ class PbftManager { */ blk_hash_t lastPbftBlockHashFromQueueOrChain(); - /** - * @brief Add rebuild DB with provided data - * @param block period data - * @param current_block_cert_votes cert votes for PeriodData pbft block period - */ - void addRebuildDBPeriodData(PeriodData &&period_data, std::vector> &¤t_block_cert_votes); - /** * @brief Get PBFT lambda. PBFT lambda is a timer clock * @return PBFT lambda @@ -266,12 +259,6 @@ class PbftManager { */ void testBroadcatVotesFunctionality(); - private: - /** - * @brief Broadcast or rebroadcast 2t+1 soft/reward/previous round next votes + all own votes if needed - */ - void broadcastVotes(); - /** * @brief Check PBFT blocks syncing queue. If there are synced PBFT blocks in queue, push it to PBFT chain */ @@ -283,6 +270,12 @@ class PbftManager { */ void waitForPeriodFinalization(); + private: + /** + * @brief Broadcast or rebroadcast 2t+1 soft/reward/previous round next votes + all own votes if needed + */ + void broadcastVotes(); + /** * @brief Reset PBFT step to 1 */ diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index 6562acfc2e..de8d6391b7 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -180,9 +180,10 @@ class VoteManager { * @brief Validates vote * * @param vote to be validated + * @param strict strict validation * @return vote validation passed, otherwise */ - std::pair validateVote(const std::shared_ptr& vote) const; + std::pair validateVote(const std::shared_ptr& vote, bool strict = true) const; /** * @brief Get 2t+1. 2t+1 is 2/3 of PBFT sortition threshold and plus 1 for a specific period diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 80465556b7..c39ef5a6fb 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -18,7 +18,6 @@ class FinalChainImpl final : public FinalChain { const uint64_t kBlockGasLimit; StateAPI state_api_; const bool kLightNode = false; - const uint64_t kLightNodeHistory = 0; const uint32_t kMaxLevelsPerPeriod; rewards::Stats rewards_; @@ -28,11 +27,6 @@ class FinalChainImpl final : public FinalChain { std::atomic num_executed_dag_blk_ = 0; std::atomic num_executed_trx_ = 0; - rocksdb::WriteOptions const db_opts_w_ = [] { - rocksdb::WriteOptions ret; - ret.sync = true; - return ret; - }(); EthBlockNumber delegation_delay_; ValueByBlockCache> block_headers_cache_; @@ -45,6 +39,9 @@ class FinalChainImpl final : public FinalChain { MapByBlockCache dpos_vote_count_cache_; MapByBlockCache dpos_is_eligible_cache_; + std::condition_variable finalized_cv_; + std::mutex finalized_mtx_; + LOG_OBJECTS_DEFINE public: @@ -57,7 +54,6 @@ class FinalChainImpl final : public FinalChain { db->stateDbStoragePath().string(), }), kLightNode(config.is_light_node), - kLightNodeHistory(config.light_node_history), kMaxLevelsPerPeriod(config.max_levels_per_period), rewards_(config.genesis.pbft.committee_size, config.genesis.state.hardforks, db_, [this](EthBlockNumber n) { return dpos_eligible_total_vote_count(n); }), @@ -89,7 +85,7 @@ class FinalChainImpl final : public FinalChain { state_db_descriptor.state_root, u256(0)); block_headers_cache_.append(header->number, header); - db_->commitWriteBatch(batch, db_opts_w_); + db_->commitWriteBatch(batch); } else { // We need to recover latest changes as there was shutdown inside finalize function if (*last_blk_num != state_db_descriptor.blk_num) [[unlikely]] { @@ -107,7 +103,7 @@ class FinalChainImpl final : public FinalChain { db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedBlkCount, num_executed_dag_blk_.load()); db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedTrxCount, num_executed_trx_.load()); db_->insert(batch, DB::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, state_db_descriptor.blk_num); - db_->commitWriteBatch(batch, db_opts_w_); + db_->commitWriteBatch(batch); last_blk_num = state_db_descriptor.blk_num; } @@ -140,6 +136,7 @@ class FinalChainImpl final : public FinalChain { finalized_dag_blk_hashes = std::move(finalized_dag_blk_hashes), anchor_block = std::move(anchor), p]() mutable { p->set_value(finalize_(std::move(new_blk), std::move(finalized_dag_blk_hashes), std::move(anchor_block))); + finalized_cv_.notify_one(); }); return p->get_future(); } @@ -231,7 +228,7 @@ class FinalChainImpl final : public FinalChain { std::move(receipts), }); - db_->commitWriteBatch(batch, db_opts_w_); + db_->commitWriteBatch(batch); state_api_.transition_state_commit(); num_executed_dag_blk_ = num_executed_dag_blk; @@ -451,6 +448,11 @@ class FinalChainImpl final : public FinalChain { return state_api_.dpos_validators_total_stakes(blk_num); } + void wait_for_finalized() override { + std::unique_lock lck(finalized_mtx_); + finalized_cv_.wait_for(lck, std::chrono::milliseconds(10)); + } + private: std::shared_ptr get_transaction_hashes(std::optional n = {}) const { const auto& trxs = db_->getPeriodTransactions(last_if_absent(n)); diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 62204eb196..b8e1682f21 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1656,7 +1656,7 @@ std::optional>>> PbftMan break; } // If syncing and pbft manager is faster than execution a delay might be needed to allow EVM to catch up - thisThreadSleepForMilliSeconds(10); + final_chain_->wait_for_finalized(); if (!retry_logged) { LOG(log_wr_) << "PBFT block " << pbft_block_hash << " validation delayed, state root missing, execution is behind"; @@ -1690,41 +1690,18 @@ std::optional>>> PbftMan return std::nullopt; } - // Get all the ordered unique non-finalized transactions which should match period_data.transactions - std::unordered_set trx_set; - std::vector transactions_to_query; - for (auto const &dag_block : period_data.dag_blocks) { - for (auto const &trx_hash : dag_block.getTrxs()) { - if (trx_set.insert(trx_hash).second) { - transactions_to_query.emplace_back(trx_hash); - } - } - } - auto non_finalized_transactions = trx_mgr_->excludeFinalizedTransactions(transactions_to_query); - - if (non_finalized_transactions.size() != period_data.transactions.size()) { - LOG(log_er_) << "Synced PBFT block " << pbft_block_hash << " transactions count " << period_data.transactions.size() - << " incorrect, expected: " << non_finalized_transactions.size(); - sync_queue_.clear(); - net->handleMaliciousSyncPeer(node_id); - return std::nullopt; - } - for (uint32_t i = 0; i < period_data.transactions.size(); i++) { - if (!non_finalized_transactions.contains(period_data.transactions[i]->getHash())) { - LOG(log_er_) << "Synced PBFT block " << pbft_block_hash << " has incorrect transaction " - << period_data.transactions[i]->getHash(); - sync_queue_.clear(); - net->handleMaliciousSyncPeer(node_id); - return std::nullopt; - } - } - return std::optional>>>( {std::move(period_data), std::move(cert_votes)}); } bool PbftManager::validatePbftBlockCertVotes(const std::shared_ptr pbft_block, const std::vector> &cert_votes) const { + // To speed up syncing/rebuilding full strict vote verification is done for all votes on every + // full_vote_validation_interval and for a random vote for each block + const uint32_t full_vote_validation_interval = 100; + const uint32_t vote_to_validate = std::rand() % cert_votes.size(); + const bool strict_validation = (pbft_block->getPeriod() % full_vote_validation_interval == 0); + if (cert_votes.empty()) { LOG(log_er_) << "No cert votes provided! The synced PBFT block comes from a malicious player"; return false; @@ -1739,7 +1716,8 @@ bool PbftManager::validatePbftBlockCertVotes(const std::shared_ptr pb return false; } - for (const auto &v : cert_votes) { + for (uint32_t vote_counter = 0; vote_counter < cert_votes.size(); vote_counter++) { + const auto &v = cert_votes[vote_counter]; // Any info is wrong that can determine the synced PBFT block comes from a malicious player if (v->getPeriod() != first_vote_period) { LOG(log_er_) << "Invalid cert vote " << v->getHash() << " period " << v->getPeriod() << ", PBFT block " @@ -1771,7 +1749,9 @@ bool PbftManager::validatePbftBlockCertVotes(const std::shared_ptr pb return false; } - if (const auto ret = vote_mgr_->validateVote(v); !ret.first) { + bool strict = strict_validation || (vote_counter == vote_to_validate); + + if (const auto ret = vote_mgr_->validateVote(v, strict); !ret.first) { LOG(log_er_) << "Cert vote " << v->getHash() << " validation failed. Err: " << ret.second << ", pbft block " << pbft_block->getBlockHash(); return false; @@ -1853,11 +1833,4 @@ std::shared_ptr PbftManager::getPbftProposedBlock(PbftPeriod period, return proposed_block->first; } -void PbftManager::addRebuildDBPeriodData(PeriodData &&period_data, - std::vector> &¤t_block_cert_votes) { - periodDataQueuePush(std::move(period_data), dev::p2p::NodeID(), std::move(current_block_cert_votes)); - pushSyncedPbftBlocksIntoChain(); - waitForPeriodFinalization(); -} - } // namespace taraxa diff --git a/libraries/core_libs/consensus/src/pbft/period_data_queue.cpp b/libraries/core_libs/consensus/src/pbft/period_data_queue.cpp index c1f8003434..9f4ac5d389 100644 --- a/libraries/core_libs/consensus/src/pbft/period_data_queue.cpp +++ b/libraries/core_libs/consensus/src/pbft/period_data_queue.cpp @@ -38,7 +38,9 @@ bool PeriodDataQueue::push(PeriodData &&period_data, const dev::p2p::NodeID &nod const auto period = period_data.pbft_blk->getPeriod(); std::unique_lock lock(queue_access_); - if (period != std::max(period_, max_pbft_size) + 1) { + // It needs to be block after the last block in the queue or max_pbft_size + 1 or max_pbft_size + 2 since it is + // possible that block max_pbft_size + 1 is removed from the queue but not yet pushed in the chain + if (period != std::max(period_, max_pbft_size) + 1 && (queue_.empty() && period != max_pbft_size + 2)) { return false; } if (max_pbft_size > period_ && !queue_.empty()) queue_.clear(); diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 3a614c7d12..c3c991c625 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -872,7 +872,7 @@ std::shared_ptr VoteManager::generateVote(const blk_hash_t& blockhash, Pbf return std::make_shared(kNodeSk, std::move(vrf_sortition), blockhash); } -std::pair VoteManager::validateVote(const std::shared_ptr& vote) const { +std::pair VoteManager::validateVote(const std::shared_ptr& vote, bool strict) const { std::stringstream err_msg; const uint64_t vote_period = vote->getPeriod(); @@ -901,7 +901,7 @@ std::pair VoteManager::validateVote(const std::shared_ptrverifyVrfSortition(*pk)) { + if (!vote->verifyVrfSortition(*pk, strict)) { err_msg << "Invalid vote " << vote->getHash() << ": invalid vrf proof"; return {false, err_msg.str()}; } diff --git a/libraries/core_libs/network/include/network/ws_server.hpp b/libraries/core_libs/network/include/network/ws_server.hpp index 0fcd33dbb9..c862b69bde 100644 --- a/libraries/core_libs/network/include/network/ws_server.hpp +++ b/libraries/core_libs/network/include/network/ws_server.hpp @@ -95,6 +95,7 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); void newPbftBlockExecuted(const PbftBlock& sche_blk, const std::vector& finalized_dag_blk_hashes); void newPendingTransaction(const trx_hash_t& trx_hash); + uint32_t numberOfSessions(); virtual std::shared_ptr createSession(tcp::socket&& socket) = 0; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp index f71ddf33aa..f5c76f6de0 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp @@ -76,8 +76,9 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr auto data = db_->getPeriodDataRaw(block_period); if (data.size() == 0) { + // This can happen when switching from light node to full node setting LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; - assert(false); + return; } dev::RLPStream s; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.cpp index 3d46f3dd52..ba31e44e43 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.cpp @@ -42,8 +42,9 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr auto data = db_->getPeriodDataRaw(block_period); if (data.size() == 0) { + // This can happen when switching from light node to full node setting LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; - assert(false); + return; } dev::RLPStream s; diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index 03f0bc7da5..c46026d42c 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -333,4 +333,9 @@ void WsServer::newPendingTransaction(trx_hash_t const &trx_hash) { } } +uint32_t WsServer::numberOfSessions() { + boost::shared_lock lock(sessions_mtx_); + return sessions.size(); +} + } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index a7c5ae8933..a95d68b421 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -239,23 +239,27 @@ void FullNode::start() { jsonrpc_api_->addConnector(jsonrpc_ws_); jsonrpc_ws_->run(); } - final_chain_->block_finalized_.subscribe( - [eth_json_rpc = as_weak(eth_json_rpc), ws = as_weak(jsonrpc_ws_), db = as_weak(db_)](auto const &res) { - if (auto _eth_json_rpc = eth_json_rpc.lock()) { - _eth_json_rpc->note_block_executed(*res->final_chain_blk, res->trxs, res->trx_receipts); - } - if (auto _ws = ws.lock()) { - _ws->newEthBlock(*res->final_chain_blk, hashes_from_transactions(res->trxs)); - if (auto _db = db.lock()) { - auto pbft_blk = _db->getPbftBlock(res->hash); - if (const auto &hash = pbft_blk->getPivotDagBlockHash(); hash != kNullBlockHash) { - _ws->newDagBlockFinalized(hash, pbft_blk->getPeriod()); + if (!conf_.db_config.rebuild_db) { + final_chain_->block_finalized_.subscribe( + [eth_json_rpc = as_weak(eth_json_rpc), ws = as_weak(jsonrpc_ws_), db = as_weak(db_)](auto const &res) { + if (auto _eth_json_rpc = eth_json_rpc.lock()) { + _eth_json_rpc->note_block_executed(*res->final_chain_blk, res->trxs, res->trx_receipts); + } + if (auto _ws = ws.lock()) { + if (_ws->numberOfSessions()) { + _ws->newEthBlock(*res->final_chain_blk, hashes_from_transactions(res->trxs)); + if (auto _db = db.lock()) { + auto pbft_blk = _db->getPbftBlock(res->hash); + if (const auto &hash = pbft_blk->getPivotDagBlockHash(); hash != kNullBlockHash) { + _ws->newDagBlockFinalized(hash, pbft_blk->getPeriod()); + } + _ws->newPbftBlockExecuted(*pbft_blk, res->dag_blk_hashes); + } } - _ws->newPbftBlockExecuted(*pbft_blk, res->dag_blk_hashes); } - } - }, - *rpc_thread_pool_); + }, + *rpc_thread_pool_); + } trx_mgr_->transaction_accepted_.subscribe( [eth_json_rpc = as_weak(eth_json_rpc), ws = as_weak(jsonrpc_ws_)](auto const &trx_hash) { @@ -296,22 +300,24 @@ void FullNode::start() { } } - // GasPricer updater - final_chain_->block_finalized_.subscribe( - [gas_pricer = as_weak(gas_pricer_)](auto const &res) { - if (auto gp = gas_pricer.lock()) { - gp->update(res->trxs); - } - }, - subscription_pool_); - - final_chain_->block_finalized_.subscribe( - [trx_manager = as_weak(trx_mgr_)](auto const &res) { - if (auto trx_mgr = trx_manager.lock()) { - trx_mgr->blockFinalized(res->final_chain_blk->number); - } - }, - subscription_pool_); + if (!conf_.db_config.rebuild_db) { + // GasPricer updater + final_chain_->block_finalized_.subscribe( + [gas_pricer = as_weak(gas_pricer_)](auto const &res) { + if (auto gp = gas_pricer.lock()) { + gp->update(res->trxs); + } + }, + subscription_pool_); + + final_chain_->block_finalized_.subscribe( + [trx_manager = as_weak(trx_mgr_)](auto const &res) { + if (auto trx_mgr = trx_manager.lock()) { + trx_mgr->blockFinalized(res->final_chain_blk->number); + } + }, + subscription_pool_); + } vote_mgr_->setNetwork(network_); pbft_mgr_->setNetwork(network_); @@ -362,6 +368,16 @@ void FullNode::rebuildDb() { // Read pbft blocks one by one PbftPeriod period = 1; std::shared_ptr period_data, next_period_data; + std::atomic_bool stop_async = false; + + std::future fut = std::async(std::launch::async, [this, &stop_async]() { + while (!stop_async) { + // While rebuilding pushSyncedPbftBlocksIntoChain will stay in its own internal loop + pbft_mgr_->pushSyncedPbftBlocksIntoChain(); + thisThreadSleepForMilliSeconds(1); + } + }); + while (true) { std::vector> cert_votes; if (next_period_data != nullptr) { @@ -381,24 +397,36 @@ void FullNode::rebuildDb() { } } else { next_period_data = std::make_shared(std::move(data)); + // More efficient to get sender(which is expensive) on this thread which is not as busy as the thread that pushes + // blocks to chain + for (auto &t : next_period_data->transactions) t->getSender(); cert_votes = next_period_data->previous_block_cert_votes; } LOG(log_nf_) << "Adding PBFT block " << period_data->pbft_blk->getBlockHash().toString() << " from old DB into syncing queue for processing, final chain size: " << final_chain_->last_block_number(); - pbft_mgr_->addRebuildDBPeriodData(std::move(*period_data), std::move(cert_votes)); + + pbft_mgr_->periodDataQueuePush(std::move(*period_data), dev::p2p::NodeID(), std::move(cert_votes)); + pbft_mgr_->waitForPeriodFinalization(); period++; + if (period % 100 == 0) { + while (period - pbft_chain_->getPbftChainSize() > 100) { + thisThreadSleepForMilliSeconds(1); + } + } if (period - 1 == conf_.db_config.rebuild_db_period) { break; } - while (final_chain_->last_block_number() != period - 1) { - thisThreadSleepForMilliSeconds(5); - LOG(log_nf_) << "Waiting on PBFT blocks to be processed. PBFT chain size " << pbft_mgr_->pbftSyncingPeriod() - << ", final chain size: " << final_chain_->last_block_number(); + + if (period % 10000 == 0) { + LOG(log_si_) << "Rebuilding period: " << period; } } + stop_async = true; + fut.wait(); + LOG(log_si_) << "Rebuild completed"; } uint64_t FullNode::getProposedBlocksCount() const { return dag_block_proposer_->getProposedBlocksCount(); } diff --git a/libraries/types/vote/include/vote/vote.hpp b/libraries/types/vote/include/vote/vote.hpp index 982586dd4d..4e6367d342 100644 --- a/libraries/types/vote/include/vote/vote.hpp +++ b/libraries/types/vote/include/vote/vote.hpp @@ -56,7 +56,7 @@ class Vote { * @brief Verify VRF sortition * @return true if passed */ - bool verifyVrfSortition(const vrf_pk_t& pk) const { return vrf_sortition_.verify(pk); } + bool verifyVrfSortition(const vrf_pk_t& pk, bool strict) const { return vrf_sortition_.verify(pk, strict); } /** * @brief Get VRF sortition diff --git a/libraries/types/vote/include/vote/vrf_sortition.hpp b/libraries/types/vote/include/vote/vrf_sortition.hpp index 20ae04a00c..4019308cf1 100644 --- a/libraries/types/vote/include/vote/vrf_sortition.hpp +++ b/libraries/types/vote/include/vote/vrf_sortition.hpp @@ -94,9 +94,13 @@ class VrfPbftSortition : public vrf_wrapper::VrfSortitionBase { /** * @brief Verify VRF sortition + * + * @param strict strict validation * @return true if passed */ - bool verify(const vrf_pk_t& pk) const { return VrfSortitionBase::verify(pk, pbft_msg_.getRlpBytes()); } + bool verify(const vrf_pk_t& pk, bool strict = true) const { + return VrfSortitionBase::verify(pk, pbft_msg_.getRlpBytes(), 1, strict); + } bool operator==(VrfPbftSortition const& other) const { return pbft_msg_ == other.pbft_msg_ && vrf_wrapper::VrfSortitionBase::operator==(other); diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index 18f75d4f00..711602cd97 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -1112,7 +1112,7 @@ TEST_F(FullNodeTest, receive_send_transaction) { } TEST_F(FullNodeTest, detect_overlap_transactions) { - auto node_cfgs = make_node_cfgs(5, 1, 20); + auto node_cfgs = make_node_cfgs(5, 1, 10); auto nodes = launch_nodes(node_cfgs); const auto expected_balances = effective_initial_balances(node_cfgs[0].genesis.state); const auto node_1_genesis_bal = own_effective_genesis_bal(node_cfgs[0]); From 5d3c5866d5ae595c20734d2fe6d83611b341d837 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 1 Dec 2023 10:05:56 +0100 Subject: [PATCH 26/72] fix building --- CMakeModules/rocksdb.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeModules/rocksdb.cmake b/CMakeModules/rocksdb.cmake index fa003f63db..86a9c12b9a 100644 --- a/CMakeModules/rocksdb.cmake +++ b/CMakeModules/rocksdb.cmake @@ -19,6 +19,7 @@ set(USE_RTTI 1) set(WITH_LZ4 ON) set(WITH_GFLAGS OFF) set(FAIL_ON_WARNINGS OFF) +set(PORTABLE 1 CACHE STRING "Override: Minimum CPU arch to support") # Disable -march=native set(WITH_TESTS OFF CACHE INTERNAL "") set(WITH_JNI OFF CACHE INTERNAL "") set(WITH_TOOLS OFF CACHE INTERNAL "") From 20e7e85be0f2c1a72c14250897f6a4de2fe9f9b5 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Mon, 4 Dec 2023 13:15:12 +0100 Subject: [PATCH 27/72] fix: setting dag synced state --- .../network/src/tarcap/shared_states/pbft_syncing_state.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libraries/core_libs/network/src/tarcap/shared_states/pbft_syncing_state.cpp b/libraries/core_libs/network/src/tarcap/shared_states/pbft_syncing_state.cpp index 85410c233d..6aab89c785 100644 --- a/libraries/core_libs/network/src/tarcap/shared_states/pbft_syncing_state.cpp +++ b/libraries/core_libs/network/src/tarcap/shared_states/pbft_syncing_state.cpp @@ -36,7 +36,9 @@ bool PbftSyncingState::setPbftSyncing(bool syncing, PbftPeriod current_period, if (syncing) { // If pbft syncing, set dag synced state to false - peer_->peer_dag_synced_ = false; + if (peer_->dagSyncingAllowed()) { + peer_->peer_dag_synced_ = false; + } deep_pbft_syncing_ = (peer_->pbft_chain_size_ - current_period >= kDeepSyncingThreshold); // Reset last sync packet time when syncing is restarted/fresh syncing flag is set setLastSyncPacketTime(); From 122def60870e16904f4105d1bfe2c6279a7dd6e3 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Wed, 6 Dec 2023 13:56:34 +0100 Subject: [PATCH 28/72] fix: queue stuck timeout --- .../packets_handlers/latest/pbft_sync_packet_handler.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index e90ead3577..6affd871e4 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -252,8 +252,9 @@ void PbftSyncPacketHandler::pbftSyncComplete() { } void PbftSyncPacketHandler::delayedPbftSync(int counter) { + const uint32_t max_delayed_pbft_sync_count = 60000 / kDelayedPbftSyncDelayMs; auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); - if (counter > 60) { + if (counter > max_delayed_pbft_sync_count) { LOG(log_er_) << "Pbft blocks stuck in queue, no new block processed in 60 seconds " << pbft_sync_period << " " << pbft_chain_->getPbftChainSize(); pbft_syncing_state_->setPbftSyncing(false); From 2adca3652837034e40c247dd131e0cfc250c96be Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Wed, 13 Dec 2023 15:48:18 +0100 Subject: [PATCH 29/72] fix: pbft sync rewards period mismatch --- .../latest/get_pbft_sync_packet_handler.cpp | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp index f5c76f6de0..d2c0adbab4 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp @@ -83,14 +83,20 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr dev::RLPStream s; if (pbft_chain_synced && last_block) { - s.appendList(3); - s << last_block; - s.appendRaw(data); - // Latest finalized block cert votes are saved in db as reward votes for new blocks const auto reward_votes = vote_mgr_->getRewardVotes(); assert(!reward_votes.empty()); - s.appendRaw(encodeVotesBundleRlp(reward_votes, false)); + // It is possible that the node pushed another block to the chain in the meantime + if (reward_votes[0]->getPeriod() == block_period) { + s.appendList(3); + s << last_block; + s.appendRaw(data); + s.appendRaw(encodeVotesBundleRlp(reward_votes, false)); + } else { + s.appendList(2); + s << last_block; + s.appendRaw(data); + } } else { s.appendList(2); s << last_block; From 59123bad9b1838a563ac38ef63e46e31f7b0c6e3 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Fri, 8 Dec 2023 09:50:37 +0100 Subject: [PATCH 30/72] fix: saving 2t+1 cert votes --- .../src/vote_manager/vote_manager.cpp | 52 ++++++++++--------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index c3c991c625..b35eb37a96 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -30,8 +30,6 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, already_validated_votes_(1000000, 1000) { LOG_OBJECTS_CREATE("VOTE_MGR"); - auto db_votes = db_->getAllTwoTPlusOneVotes(); - auto addVerifiedVotes = [this](const std::vector>& votes, bool set_reward_votes_info = false) { bool rewards_info_already_set = false; for (const auto& vote : votes) { @@ -136,33 +134,37 @@ void VoteManager::setCurrentPbftPeriodAndRound(PbftPeriod pbft_period, PbftRound // a period or round that we are not yet in for (const auto& two_t_plus_one_voted_block : found_round_it->second.two_t_plus_one_voted_blocks_) { const TwoTPlusOneVotedBlockType two_t_plus_one_voted_block_type = two_t_plus_one_voted_block.first; - const auto& [two_t_plus_one_voted_block_hash, two_t_plus_one_voted_block_step] = two_t_plus_one_voted_block.second; + // 2t+1 cert voted blocks are only saved to the database in a db batch when block is pushed to the chain + if (two_t_plus_one_voted_block_type != TwoTPlusOneVotedBlockType::CertVotedBlock) { + const auto& [two_t_plus_one_voted_block_hash, two_t_plus_one_voted_block_step] = + two_t_plus_one_voted_block.second; + + const auto found_step_votes_it = found_round_it->second.step_votes.find(two_t_plus_one_voted_block_step); + if (found_step_votes_it == found_round_it->second.step_votes.end()) { + LOG(log_er_) << "Unable to find 2t+1 votes in verified_votes for period " << pbft_period << ", round " + << pbft_round << ", step " << two_t_plus_one_voted_block_step; + assert(false); + return; + } - const auto found_step_votes_it = found_round_it->second.step_votes.find(two_t_plus_one_voted_block_step); - if (found_step_votes_it == found_round_it->second.step_votes.end()) { - LOG(log_er_) << "Unable to find 2t+1 votes in verified_votes for period " << pbft_period << ", round " - << pbft_round << ", step " << two_t_plus_one_voted_block_step; - assert(false); - return; - } + // Find verified votes for specified block_hash based on found 2t+1 voted block of type "type" + const auto found_verified_votes_it = found_step_votes_it->second.votes.find(two_t_plus_one_voted_block_hash); + if (found_verified_votes_it == found_step_votes_it->second.votes.end()) { + LOG(log_er_) << "Unable to find 2t+1 votes in verified_votes for period " << pbft_period << ", round " + << pbft_round << ", step " << two_t_plus_one_voted_block_step << ", block hash " + << two_t_plus_one_voted_block_hash; + assert(false); + return; + } - // Find verified votes for specified block_hash based on found 2t+1 voted block of type "type" - const auto found_verified_votes_it = found_step_votes_it->second.votes.find(two_t_plus_one_voted_block_hash); - if (found_verified_votes_it == found_step_votes_it->second.votes.end()) { - LOG(log_er_) << "Unable to find 2t+1 votes in verified_votes for period " << pbft_period << ", round " - << pbft_round << ", step " << two_t_plus_one_voted_block_step << ", block hash " - << two_t_plus_one_voted_block_hash; - assert(false); - return; - } + std::vector> votes; + votes.reserve(found_verified_votes_it->second.second.size()); + for (const auto& vote : found_verified_votes_it->second.second) { + votes.push_back(vote.second); + } - std::vector> votes; - votes.reserve(found_verified_votes_it->second.second.size()); - for (const auto& vote : found_verified_votes_it->second.second) { - votes.push_back(vote.second); + db_->replaceTwoTPlusOneVotes(two_t_plus_one_voted_block_type, votes); } - - db_->replaceTwoTPlusOneVotes(two_t_plus_one_voted_block_type, votes); } } From 55a9a6ebbc6001d966d1713c17eec36b8435bd67 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Fri, 15 Dec 2023 10:36:02 +0100 Subject: [PATCH 31/72] chore: optimize dag block save --- .../src/transaction/transaction_manager.cpp | 20 ++++++++----------- libraries/core_libs/node/src/node.cpp | 2 ++ .../storage/include/storage/storage.hpp | 1 - libraries/core_libs/storage/src/storage.cpp | 11 ---------- tests/transaction_test.cpp | 6 +++--- 5 files changed, 13 insertions(+), 27 deletions(-) diff --git a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp index 1a2d62d2b2..bc78695816 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp @@ -166,28 +166,24 @@ void TransactionManager::saveTransactionsFromDagBlock(SharedTransactions const & std::vector accepted_transactions; accepted_transactions.reserve(trxs.size()); auto write_batch = db_->createWriteBatch(); - vec_trx_t trx_hashes; - std::transform(trxs.begin(), trxs.end(), std::back_inserter(trx_hashes), - [](std::shared_ptr const &t) { return t->getHash(); }); - { - // This lock synchronizes inserting and removing transactions from transactions memory pool with database insertion. + // This lock synchronizes removing transactions from transactions memory pool with database insertion. // Unique lock here makes sure that transactions we are removing are not reinserted in transactions_pool_ std::unique_lock transactions_lock(transactions_mutex_); - auto trx_in_db = db_->transactionsInDb(trx_hashes); - for (uint64_t i = 0; i < trxs.size(); i++) { - auto const &trx_hash = trx_hashes[i]; + for (const auto &trx : trxs) { + auto const &trx_hash = trx->getHash(); // We only save transaction if it has not already been saved - if (!trx_in_db[i]) { - db_->addTransactionToBatch(*trxs[i], write_batch); - nonfinalized_transactions_in_dag_.emplace(trx_hash, trxs[i]); - } if (transactions_pool_.erase(trx_hash)) { LOG(log_dg_) << "Transaction " << trx_hash << " removed from trx pool "; // Transactions are counted when included in DAG trx_count_++; accepted_transactions.emplace_back(trx_hash); + if (nonfinalized_transactions_in_dag_.emplace(trx_hash, trx).second) { + db_->addTransactionToBatch(*trx, write_batch); + } else { + LOG(log_er_) << "Transaction " << trx_hash << " removed from transaction pool multiple times"; + } } } db_->addStatusFieldToBatch(StatusDbField::TrxCount, trx_count_, write_batch); diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index a95d68b421..054c285ec6 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -426,6 +426,8 @@ void FullNode::rebuildDb() { } stop_async = true; fut.wait(); + // Handles the race case if some blocks are still in the queue + pbft_mgr_->pushSyncedPbftBlocksIntoChain(); LOG(log_si_) << "Rebuild completed"; } diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 7e66789733..ed2d84b803 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -238,7 +238,6 @@ class DbStorage : public std::enable_shared_from_this { SharedTransactions getAllNonfinalizedTransactions(); bool transactionInDb(trx_hash_t const& hash); bool transactionFinalized(trx_hash_t const& hash); - std::vector transactionsInDb(std::vector const& trx_hashes); std::vector transactionsFinalized(std::vector const& trx_hashes); void addTransactionToBatch(Transaction const& trx, Batch& write_batch); void removeTransactionToBatch(trx_hash_t const& trx, Batch& write_batch); diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index 048290e626..348348b244 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -871,17 +871,6 @@ bool DbStorage::transactionFinalized(trx_hash_t const& hash) { return exist(toSlice(hash.asBytes()), Columns::trx_period); } -std::vector DbStorage::transactionsInDb(std::vector const& trx_hashes) { - std::vector result(trx_hashes.size(), false); - for (size_t i = 0; i < trx_hashes.size(); ++i) { - const auto key = trx_hashes[i].asBytes(); - if (exist(toSlice(key), Columns::transactions) || exist(toSlice(key), Columns::trx_period)) { - result[i] = true; - } - } - return result; -} - uint64_t DbStorage::getStatusField(StatusDbField const& field) { auto status = lookup(toSlice((uint8_t)field), Columns::status); if (!status.empty()) { diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index 53637d89a0..4d6bd8e47f 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -258,10 +258,10 @@ TEST_F(TransactionTest, transaction_concurrency) { TransactionManager trx_mgr(cfg, db, NewFinalChain(db, cfg), addr_t()); bool stopped = false; // Insert transactions to memory pool and keep trying to insert them again on separate thread, it should always fail + for (auto const& t : *g_signed_trx_samples) { + trx_mgr.insertTransaction(t); + } std::thread insertTrx([&trx_mgr, &stopped]() { - for (auto const& t : *g_signed_trx_samples) { - trx_mgr.insertTransaction(t); - } while (!stopped) { for (auto const& t : *g_signed_trx_samples) { EXPECT_FALSE(trx_mgr.insertTransaction(t).first); From 177857aa3bf442e34c8c394e964be45341a7bb56 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Tue, 19 Dec 2023 09:42:09 +0100 Subject: [PATCH 32/72] chore: update genesis hash for devnet reset --- .../cli/include/cli/config_jsons/devnet/devnet_genesis.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 5a0d4441ae..e8a9fe815c 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -4,7 +4,7 @@ "level": "0x0", "pivot": "0x0000000000000000000000000000000000000000000000000000000000000000", "sig": "0xb7e22d46c1ba94d5e8347b01d137b5c428fcbbeaf0a77fb024cbbf1517656ff00d04f7f25be608c321b0d7483c402c294ff46c49b265305d046a52236c0a363701", - "timestamp": "0x652E75C2", + "timestamp": "0x652E75C3", "tips": [], "transactions": [] }, From c2ae8ae947cc94b4279e2beac97be79b8f57df3f Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 19 Dec 2023 11:31:12 +0100 Subject: [PATCH 33/72] chore: fix local-net --- for_devs/README.md | 20 +- for_devs/local-net | 126 +++++----- for_devs/requirements.txt | 233 +++++++++++++++--- .../config_jsons/default/default_config.json | 8 - .../config_jsons/devnet/devnet_config.json | 8 - libraries/cli/src/tools.cpp | 10 + 6 files changed, 287 insertions(+), 118 deletions(-) diff --git a/for_devs/README.md b/for_devs/README.md index 0d5e7aa9e6..9b80f92b25 100644 --- a/for_devs/README.md +++ b/for_devs/README.md @@ -17,7 +17,7 @@ Usage: local-net start [OPTIONS] BINARY Start a local testnet Options: - --boot-nodes INTEGER Number of boot nodes + --consensus-nodes INTEGER Number of consensus nodes --rpc-nodes INTEGER Number of RPC nodes --tps INTEGER Number of transactions per second (if zero the faucet will not start) @@ -27,25 +27,25 @@ Options: The script can be run from anywhere but keep in mind that it will create a new directory called `local-net-data` that contains the data and config files for the nodes in the current path. -For example if you want to test the new binary on a network with 3 boot nodes and 1 RPC node you can run the following command in the root of the current repo: +For example if you want to test the new binary on a network with 3 consensus nodes and 1 RPC node you can run the following command in the root of the current repo: ```bash -./for_devs/local-net start --boot-nodes 3 --rpc-nodes 1 --tps 1 ./cmake-build/bin/taraxad +./for_devs/local-net start --consensus-nodes 3 --rpc-nodes 1 --tps 1 ./cmake-build/bin/taraxad ``` Network can be stopped, config files for nodes adjusted and redeployed... It can be used for debugging, for example: -- Deploy new network with 3 boot nodes and 1 rpc node: +- Deploy new network with 3 consensus nodes and 1 rpc node: ```bash -./for_devs/local-net start --boot-nodes 3 --rpc-nodes 1 --tps 1 ./cmake-build/bin/taraxad +./for_devs/local-net start --consensus-nodes 3 --rpc-nodes 1 --tps 1 ./cmake-build/bin/taraxad ``` -- Let it run for 1 minute so boot nodes create a few pbft blocks. Then stop the network, increase number of rpc nodes(let's say to 5) and redeploy the networK: +- Let it run for 1 minute so consensus nodes create a few pbft blocks. Then stop the network, increase number of rpc nodes(let's say to 5) and redeploy the networK: ```bash -./for_devs/local-net start --boot-nodes 3 --rpc-nodes 5 --tps 1 ./cmake-build/bin/taraxad +./for_devs/local-net start --consensus-nodes 3 --rpc-nodes 5 --tps 1 ./cmake-build/bin/taraxad ``` -New rpc nodes start syncing with original nodes as they are behind with pbft and we can debug this process. Network can be stopped at any time, -config files adjusted and redeployed with the same command. +New rpc nodes start syncing with original nodes as they are behind with pbft and we can debug this process. Network can be stopped at any time, +config files adjusted and redeployed with the same command. -!!! Note: For existing network only rpc nodes number can be increased, in case you want to increase boot nodes number, network must deployed from scratch. \ No newline at end of file +!!! Note: For existing network only rpc nodes number can be increased, in case you want to increase consensus nodes number, network must deployed from scratch. \ No newline at end of file diff --git a/for_devs/local-net b/for_devs/local-net index 2cfd1c57f2..8add8bec84 100755 --- a/for_devs/local-net +++ b/for_devs/local-net @@ -1,4 +1,4 @@ -#!/usr/bin/env python3.9 +#!/usr/bin/env python3 import click import subprocess @@ -28,7 +28,7 @@ worker_colors = {} chain_id = 0 faucet_private_key = '' faucet_public_address = '' -boot_nodes_public_addresses = {} +consensus_nodes_public_addresses = {} data_dir = 'local-net-data' if not os.path.isdir(f'./{data_dir}'): @@ -36,47 +36,63 @@ if not os.path.isdir(f'./{data_dir}'): local_net = os.path.realpath(f'./{data_dir}') - @click.group() def cli(): - """Local Net CLI""" + """Local Net CLI - A tool for managing and simulating a local blockchain testnet.""" pass - @cli.command() -@click.option('--boot-nodes', default=3, help='Number of boot nodes') -@click.option('--rpc-nodes', default=3, help='Number of RPC nodes') -@click.option('--tps', default=0, help='Number of transactions per second (if zero the faucet will not start)') -@click.option('--enable-test-rpc', is_flag=True, default=False, help='Enables Test JsonRPC') +@click.option('--consensus-nodes', default=3, show_default=True, + help='Number of consensus nodes to initialize. This determines the number of nodes that will participate in consensus processes.') +@click.option('--rpc-nodes', default=3, show_default=True, + help='Number of RPC nodes to initialize. These nodes provide a remote procedure call interface.') +@click.option('--tps', default=0, show_default=True, + help='Transactions per second for the faucet to simulate. If set to zero, the faucet will not start.') +@click.option('--enable-test-rpc', is_flag=True, default=False, show_default=True, + help='Flag to enable Test JsonRPC endpoints, useful for testing and debugging purposes.') @click.argument('binary') -def start(boot_nodes, rpc_nodes, tps, enable_test_rpc, binary): - """Start a local testnet""" + +def start(consensus_nodes, rpc_nodes, tps, enable_test_rpc, binary): + """ + Start a local testnet with configurable consensus and RPC nodes. + + This command initializes a local blockchain testnet environment, allowing you + to simulate blockchain operations and test various configurations. + + Example: + Start a testnet with default settings: + $ local-net start --binary /path/to/binary + """ + # Validate the binary path + if not os.path.exists(binary): + raise click.BadParameter(f"The specified binary path does not exist: {binary}") + binary = os.path.realpath(binary) - config(binary, boot_nodes, rpc_nodes) - start_workers(binary, boot_nodes, rpc_nodes, tps, enable_test_rpc) + config(binary, consensus_nodes, rpc_nodes) + start_workers(binary, consensus_nodes, rpc_nodes, tps, enable_test_rpc) -def config(binary, boot_nodes, rpc_nodes): - global boot_nodes_public_addresses +def config(binary, consensus_nodes, rpc_nodes): + global consensus_nodes_public_addresses - boot_nodes_flags = [] + consensus_nodes_flags = [] private_keys = {} vrf_secrets = {} - for i in range(boot_nodes): - worker_name = f'boot-node-{i}' - wallet_path = f'wallet-boot-node-{i}.json' + for i in range(consensus_nodes): + worker_name = f'consensus-node-{i}' + wallet_path = f'wallet-consensus-node-{i}.json' (private_key, public_key, public_address, vrf_secret, vrf_public) = generate_node_wallet(binary, wallet_path) private_keys[worker_name] = private_key vrf_secrets[worker_name] = vrf_secret - boot_nodes_public_addresses[worker_name] = [public_address, vrf_public] + consensus_nodes_public_addresses[worker_name] = [public_address, vrf_public] - boot_nodes_flags.append( + consensus_nodes_flags.append( f'127.0.0.1:10{i+1}02/{public_key}') - boot_nodes_flags = " ".join(boot_nodes_flags) + consensus_nodes_flags = " ".join(consensus_nodes_flags) generate_faucet_wallet(binary) @@ -85,15 +101,15 @@ def config(binary, boot_nodes, rpc_nodes): existing_genesis = None existing_genesis_node = None - for type in ("boot", "rpc"): - if type == "boot": - nodes = boot_nodes + for type in ("consensus", "rpc"): + if type == "consensus": + nodes = consensus_nodes else: nodes = rpc_nodes for i in range(nodes): - if type == "boot": - worker_name = f'boot-node-{i}' + if type == "consensus": + worker_name = f'consensus-node-{i}' else: worker_name = f'rpc-node-{i}' @@ -118,22 +134,22 @@ def config(binary, boot_nodes, rpc_nodes): timestamp = tmp_genesis['dag_genesis_block']['timestamp'] continue - if type == "boot" and existing_genesis: - sys.exit("Cannot increase number of boot nodes without resetting the network") + if type == "consensus" and existing_genesis: + sys.exit("Cannot increase number of consensus nodes without resetting the network") if not os.path.isdir(f'{local_net}/db-{worker_name}'): os.mkdir(f'{local_net}/db-{worker_name}') cmd = '' - cmd += f'{binary} --command config --chain-id {chain_id} ' - cmd += f'--boot-nodes {boot_nodes_flags} ' + cmd += f'{binary} --overwrite-config --command config --chain-id {chain_id} ' + cmd += f'--boot-nodes {consensus_nodes_flags} ' - if type == "boot": - # cmd += '--boot-node ' // TODO: we should replace this with bootnode binary? + if type == "consensus": + # cmd += '--consensus-node ' // TODO: we should replace this with consensusnode binary? cmd += f'--node-secret {private_keys[worker_name]} ' cmd += f'--vrf-secret {vrf_secrets[worker_name]} ' - if type == "boot": + if type == "consensus": port_prefix = '0' else: port_prefix = '1' @@ -159,9 +175,9 @@ def config(binary, boot_nodes, rpc_nodes): else: default_validator = genesis['dpos']['initial_validators'][0] genesis['dpos']['initial_validators'] = [] - print("boot_nodes_public_addresses: ",boot_nodes_public_addresses) - for key in boot_nodes_public_addresses: - address, vrf_key = boot_nodes_public_addresses[key] + print("consensus_nodes_public_addresses: ",consensus_nodes_public_addresses) + for key in consensus_nodes_public_addresses: + address, vrf_key = consensus_nodes_public_addresses[key] default_validator["address"] = address default_validator["owner"] = address default_validator["vrf_key"] = vrf_key @@ -248,17 +264,17 @@ def generate_wallet(binary): return (private_key, public_key, public_address, vrf_secret, vrf_key) -def start_workers(binary, boot_nodes, rpc_nodes, tps, enable_test_rpc): +def start_workers(binary, consensus_nodes, rpc_nodes, tps, enable_test_rpc): c = list(colors.keys()) - for type in ("boot", "rpc"): - if type == "boot": - nodes = boot_nodes + for type in ("consensus", "rpc"): + if type == "consensus": + nodes = consensus_nodes else: nodes = rpc_nodes for i in range(nodes): - if type == "boot": - worker_name = f'boot-node-{i}' + if type == "consensus": + worker_name = f'consensus-node-{i}' else: worker_name = f'rpc-node-{i}' @@ -296,21 +312,21 @@ def faucet_worker(tps): # Wait for the nodes to start time.sleep(10) web3 = Web3(Web3.HTTPProvider('http://127.0.0.1:7017')) - nonce = web3.eth.getTransactionCount( - Web3.toChecksumAddress(faucet_public_address)) + nonce = web3.eth.get_transaction_count( + Web3.to_checksum_address(faucet_public_address)) - boot_nodes = list(boot_nodes_public_addresses.keys()) + consensus_nodes = list(consensus_nodes_public_addresses.keys()) while True: time.sleep(1/tps) - to, _ = boot_nodes_public_addresses[boot_nodes[randint( - 0, len(boot_nodes)-1)]] + to, _ = consensus_nodes_public_addresses[consensus_nodes[randint( + 0, len(consensus_nodes)-1)]] tx = { 'nonce': nonce, - 'to': Web3.toChecksumAddress(to), - 'value': web3.toWei(100000000, 'gwei'), + 'to': Web3.to_checksum_address(to), + 'value': web3.to_wei(100000000, 'gwei'), 'gas': 21000, - 'gasPrice': web3.toWei(1, 'gwei'), + 'gasPrice': web3.to_wei(1, 'gwei'), 'chainId': int(chain_id) } nonce = nonce + 1 @@ -320,11 +336,11 @@ def faucet_worker(tps): t = time.strftime('%Y-%m-%d %H:%M:%S', current_time) try: - tx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction) + tx_hash = web3.eth.send_raw_transaction(signed_tx.rawTransaction) log_format( - 'faucet', f'{t} Dripped to {to}, tx_hash: {web3.toHex(tx_hash)}') - except: - log_format('faucet', f'{t} Failed to drip to {to}') + 'faucet', f'{t} Dripped to {to}, tx_hash: {web3.to_hex(tx_hash)}') + except Exception as e: + log_format('faucet', f'{t} Failed to drip to {to}. Error: {str(e)}') pass diff --git a/for_devs/requirements.txt b/for_devs/requirements.txt index ded956100f..245604ed05 100644 --- a/for_devs/requirements.txt +++ b/for_devs/requirements.txt @@ -1,39 +1,198 @@ -aiohttp==3.9.0 -async-timeout==3.0.1 -attrs==21.2.0 -base58==2.1.0 -bitarray==1.2.2 -certifi==2023.7.22 -chardet==4.0.0 -click==8.0.1 -cytoolz==0.11.0 -eth-abi==4.2.0 -eth-account==0.5.9 -eth-hash==0.3.1 -eth-keyfile==0.5.1 -eth-keys==0.3.4 -eth-rlp==0.2.1 -eth-typing==2.2.2 -eth-utils==1.10.0 -hexbytes==0.2.1 -idna==2.10 -ipfshttpclient==0.7.0 -jsonschema==3.2.0 -lru-dict==1.1.7 -multiaddr==0.0.9 -multidict==5.1.0 -netaddr==0.8.0 -parsimonious==0.8.1 -protobuf==3.18.3 -pycryptodome==3.10.1 -pyrsistent==0.17.3 +aiohttp==3.9.1 +aiosignal==1.3.1 +anyio==4.0.0 +apt-xapian-index==0.49 +argcomplete==2.0.0 +arrow==1.3.0 +asn1crypto==1.5.1 +attrs==23.1.0 +Babel==2.10.3 +bcrypt==4.0.1 +binaryornot==0.4.4 +bitarray==2.9.0 +blinker==1.6.2 +boto3==1.28.68 +botocore==1.31.68 +bottle==0.12.25 +Brotli==1.0.9 +build==0.10.0 +CacheControl==0.13.1 +certifi==2022.9.24 +cffi==1.16.0 +chardet==5.1.0 +cleo==2.0.1 +click==8.1.6 +click-configfile==0.2.3 +colorama==0.4.6 +command-not-found==0.3 +conan==1.59.0 +configparser==6.0.0 +cookiecutter==2.4.0 +crashtest==0.4.1 +cryptography==38.0.4 +cupshelpers==1.0 +cytoolz==0.12.2 +dbus-python==1.3.2 +defer==1.0.6 +distlib==0.3.7 +distro==1.8.0 +distro-info==1.5 +docker==5.0.3 +docker-compose==1.29.2 +dockerpty==0.4.1 +docopt==0.6.2 +dulwich==0.21.6 +eth-abi==4.2.1 +eth-account==0.10.0 +eth-hash==0.5.2 +eth-keyfile==0.7.0 +eth-keys==0.4.0 +eth-rlp==1.0.0 +eth-typing==3.5.2 +eth-utils==2.3.1 +fabric==2.7.1 +fasteners==0.19 +fastimport==0.9.14 +filelock==3.12.2 +frozenlist==1.4.1 +fuse-python==1.0.5 +ghp-import==2.1.0 +gpg==1.18.0 +gyp==0.1 +h11==0.14.0 +hamlet==10.0.1 +hexbytes==0.3.1 +html5lib==1.1 +httpcore==0.18.0 +httplib2==0.20.4 +httpx==0.25.0 +idna==3.3 +importlib-metadata==4.12.0 +importlib-resources==5.13.0 +iniconfig==1.1.1 +installer==0.7.0 +invoke==1.7.3 +jaraco.classes==3.2.1 +jeepney==0.8.0 +Jinja2==3.1.2 +jmespath==0.10.0 +joblib==1.2.0 +jsonpointer==2.0 +jsonschema==4.10.3 +keyring==24.2.0 +language-selector==0.1 +launchpadlib==1.11.0 +lazr.restfulclient==0.14.5 +lazr.uri==1.0.6 +livereload==2.6.3 +lockfile==0.12.2 +lru-dict==1.2.0 +lunr==0.6.2 +Markdown==3.4.4 +markdown-it-py==3.0.0 +MarkupSafe==2.1.3 +marshmallow==3.20.1 +mat==0.13.4 +mdurl==0.1.2 +mechanize==0.4.8 +mergedeep==1.3.4 +mkdocs==1.4.2 +more-itertools==10.1.0 +msgpack==1.0.3 +multidict==6.0.4 +mutagen==1.46.0 +netifaces==0.11.0 +nltk==3.8.1 +node-semver==0.6.1 +numpy==1.24.2 +oauthlib==3.2.2 +olefile==0.46 +packaging==23.1 +paramiko==3.3.1 +parsimonious==0.9.0 +patch-ng==1.17.4 +pathlib2==2.3.7.post1 +pexpect==4.8.0 +Pillow==10.0.0 +pipx==1.2.0 +pkginfo==1.8.2 +platformdirs==3.10.0 +pluggy==1.2.0 +pluginbase==1.0.1 +poetry==1.6.1 +poetry-core==1.6.1 +protobuf==4.25.1 +psutil==5.9.4 +ptyprocess==0.7.0 +pycairo==1.24.0 +pycparser==2.21 +pycryptodome==3.19.0 +pycryptodomex==3.11.0 +pycups==2.0.1 +Pygments==2.15.1 +PyGObject==3.46.0 +pyinotify==0.9.6 +PyJWT==2.7.0 +pylev==1.4.0 +pylibacl==0.7.0 +PyNaCl==1.5.0 +pyparsing==3.1.0 +pyproject_hooks==1.0.0 +PyQt5==5.15.9 +PyQt5-sip==12.12.2 +pyrsistent==0.18.1 +pytest==6.2.5 +python-apt==2.6.0+ubuntu1 +python-dateutil==2.8.2 +python-debian==0.1.49+ubuntu2 +python-dotenv==1.0.0 +python-slugify==8.0.1 +pytz==2023.3 +pyunormalize==15.1.0 +pyxattr==0.8.1 +PyYAML==6.0 +pyyaml_env_tag==0.1 +regex==2022.10.31 requests==2.31.0 -rlp==2.0.1 +requests-toolbelt==1.0.0 +rich==13.3.1 +rlp==4.0.0 +s3transfer==0.7.0 +SecretStorage==3.3.3 +semver==2.13.0 +shellingham==1.5.1 +simple-term-menu==1.6.1 +simplejson==3.19.1 six==1.16.0 -toolz==0.11.1 -typing-extensions==3.10.0.0 -urllib3==1.26.18 -varint==1.0.2 -web3==5.20.0 -websockets==8.1 -yarl==1.6.3 +sniffio==1.3.0 +systemd-python==235 +tabulate==0.9.0 +text-unidecode==1.3 +texttable==1.6.7 +toml==0.10.2 +tomlkit==0.12.1 +toolz==0.12.0 +tornado==6.3.2 +tqdm==4.64.1 +trove-classifiers==2023.7.6 +types-python-dateutil==2.8.19.14 +typing_extensions==4.9.0 +ubuntu-advantage-tools==8001 +ubuntu-drivers-common==0.0.0 +ufw==0.36.2 +unattended-upgrades==0.1 +urllib3==1.26.16 +usb-creator==0.3.16 +userpath==1.9.0 +virtualenv==20.24.1+ds +wadllib==1.3.6 +watchdog==3.0.0 +web3==6.12.0 +webencodings==0.5.1 +websocket-client==1.2.3 +websockets==10.4 +www-authenticate==0.9.2 +xkit==0.0.0 +yarl==1.9.4 +yt-dlp==2023.7.6 +zipp==1.0.0 diff --git a/libraries/cli/include/cli/config_jsons/default/default_config.json b/libraries/cli/include/cli/config_jsons/default/default_config.json index 35c31d9ef6..72e9409c3f 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_config.json +++ b/libraries/cli/include/cli/config_jsons/default/default_config.json @@ -52,14 +52,6 @@ { "name": "SUMMARY", "verbosity": "INFO" - }, - { - "name": "PBFT_MGR", - "verbosity": "INFO" - }, - { - "name": "DAG_PROPOSER", - "verbosity": "INFO" } ], "outputs": [ diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json index 9afbc21dc1..47d0713725 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json @@ -67,14 +67,6 @@ { "name": "SUMMARY", "verbosity": "INFO" - }, - { - "name": "PBFT_MGR", - "verbosity": "INFO" - }, - { - "name": "DAG_PROPOSER", - "verbosity": "INFO" } ], "outputs": [ diff --git a/libraries/cli/src/tools.cpp b/libraries/cli/src/tools.cpp index cdfea7be4f..c92df519b3 100644 --- a/libraries/cli/src/tools.cpp +++ b/libraries/cli/src/tools.cpp @@ -86,6 +86,16 @@ Json::Value overrideConfig(Json::Value& conf, std::string& data_dir, const vecto // Override boot nodes if (boot_nodes.size() > 0) { conf["network"]["boot_nodes"] = Json::Value(Json::arrayValue); + for (auto const& b : boot_nodes) { + vector result; + boost::split(result, b, boost::is_any_of(":/")); + if (result.size() != 3) throw invalid_argument("Boot node in boot_nodes not specified correctly"); + Json::Value b_node; + b_node["id"] = result[2]; + b_node["ip"] = result[0]; + b_node["port"] = stoi(result[1]); + conf["network"]["boot_nodes"].append(b_node); + } } if (boot_nodes_append.size() > 0) { for (auto const& b : boot_nodes_append) { From ba3af43ec14a19372324da02a8ae1c784984f4cb Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Tue, 19 Dec 2023 13:40:35 +0100 Subject: [PATCH 34/72] fix: transaction pool --- .../core_libs/consensus/src/transaction/transaction_queue.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp b/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp index c0f0b12fe9..d16a06f835 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp @@ -79,7 +79,9 @@ SharedTransactions TransactionQueue::getAllTransactions() const { bool TransactionQueue::erase(const trx_hash_t &hash) { // Find the hash const auto it = queue_transactions_.find(hash); - if (it == queue_transactions_.end()) return false; + if (it == queue_transactions_.end()) { + return non_proposable_transactions_.erase(hash) > 0; + } const auto &account_it = account_nonce_transactions_.find(it->second->getSender()); assert(account_it != account_nonce_transactions_.end()); From cc827b4145dc8c5e138ae6262418df32cb45097f Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 19 Dec 2023 15:10:32 +0100 Subject: [PATCH 35/72] chore: remove old tarcap --- .../get_next_votes_bundle_packet_handler.hpp | 16 -- .../v1/get_pbft_sync_packet_handler.hpp | 17 -- .../v1/init_packets_handlers.hpp | 68 -------- .../v1/pbft_sync_packet_handler.hpp | 17 -- .../v1/votes_bundle_packet_handler.hpp | 20 --- libraries/core_libs/network/src/network.cpp | 15 +- .../packets_handlers/{v1 => latest}/readme.md | 0 .../get_next_votes_bundle_packet_handler.cpp | 35 ---- .../v1/get_pbft_sync_packet_handler.cpp | 76 --------- .../v1/pbft_sync_packet_handler.cpp | 41 ----- .../v1/votes_bundle_packet_handler.cpp | 156 ------------------ 11 files changed, 4 insertions(+), 457 deletions(-) delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/get_next_votes_bundle_packet_handler.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/init_packets_handlers.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/pbft_sync_packet_handler.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/votes_bundle_packet_handler.hpp rename libraries/core_libs/network/src/tarcap/packets_handlers/{v1 => latest}/readme.md (100%) delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_next_votes_bundle_packet_handler.cpp delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.cpp delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v1/pbft_sync_packet_handler.cpp delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v1/votes_bundle_packet_handler.cpp diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/get_next_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/get_next_votes_bundle_packet_handler.hpp deleted file mode 100644 index 7208ab6ec3..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/get_next_votes_bundle_packet_handler.hpp +++ /dev/null @@ -1,16 +0,0 @@ -#pragma once - -#include "network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp" - -namespace taraxa::network::tarcap::v1 { - -// V1 packets handlers must be derived from latest packets handlers otherwise network class might not work properly ! -class GetNextVotesBundlePacketHandler final : public tarcap::GetNextVotesBundlePacketHandler { - public: - using tarcap::GetNextVotesBundlePacketHandler::GetNextVotesBundlePacketHandler; - - void sendPbftVotesBundle(const std::shared_ptr& peer, - std::vector>&& votes) override; -}; - -} // namespace taraxa::network::tarcap::v1 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.hpp deleted file mode 100644 index a6d895439e..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.hpp +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#include "network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp" - -namespace taraxa::network::tarcap::v1 { - -// V1 packets handlers must be derived from latest packets handlers otherwise network class might not work properly ! -class GetPbftSyncPacketHandler final : public tarcap::GetPbftSyncPacketHandler { - public: - using tarcap::GetPbftSyncPacketHandler::GetPbftSyncPacketHandler; - - private: - void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, size_t blocks_to_transfer, - bool pbft_chain_synced) override; -}; - -} // namespace taraxa::network::tarcap::v1 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/init_packets_handlers.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/init_packets_handlers.hpp deleted file mode 100644 index 802277bf89..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/init_packets_handlers.hpp +++ /dev/null @@ -1,68 +0,0 @@ -#pragma once - -#include "get_next_votes_bundle_packet_handler.hpp" -#include "get_pbft_sync_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/status_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" -#include "network/tarcap/taraxa_capability.hpp" -#include "pbft_sync_packet_handler.hpp" -#include "slashing_manager/slashing_manager.hpp" -#include "votes_bundle_packet_handler.hpp" - -namespace taraxa::network::tarcap::v1 { - -/** - * @brief Taraxa capability V1 InitPacketsHandlers function definition - */ -static const TaraxaCapability::InitPacketsHandlers kInitV1Handlers = - [](const std::string &logs_prefix, const FullNodeConfig &config, const h256 &genesis_hash, - const std::shared_ptr &peers_state, const std::shared_ptr &pbft_syncing_state, - const std::shared_ptr &packets_stats, const std::shared_ptr &db, - const std::shared_ptr &pbft_mgr, const std::shared_ptr &pbft_chain, - const std::shared_ptr &vote_mgr, const std::shared_ptr &dag_mgr, - const std::shared_ptr &trx_mgr, const std::shared_ptr &slashing_manager, - const addr_t &node_addr) { - auto packets_handlers = std::make_shared(); - - // Consensus packets with high processing priority - packets_handlers->registerHandler( - config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); - packets_handlers->registerHandler( - config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); - packets_handlers->registerHandler( - config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); - - // Standard packets with mid processing priority - packets_handlers->registerHandler(config, peers_state, packets_stats, - pbft_syncing_state, pbft_chain, pbft_mgr, - dag_mgr, trx_mgr, db, node_addr, logs_prefix); - - packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, - node_addr, logs_prefix); - - // Non critical packets with low processing priority - packets_handlers->registerHandler(config, peers_state, packets_stats, - pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, - db, genesis_hash, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, - dag_mgr, db, node_addr, logs_prefix); - - packets_handlers->registerHandler(config, peers_state, packets_stats, - pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, - trx_mgr, db, node_addr, logs_prefix); - - packets_handlers->registerHandler( - config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, vote_mgr, db, node_addr, logs_prefix); - - packets_handlers->registerHandler( - config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, vote_mgr, db, - node_addr, logs_prefix); - - return packets_handlers; - }; - -} // namespace taraxa::network::tarcap::v1 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/pbft_sync_packet_handler.hpp deleted file mode 100644 index f698d9b7ab..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/pbft_sync_packet_handler.hpp +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#include "network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp" - -namespace taraxa::network::tarcap::v1 { - -// V1 packets handlers must be derived from latest packets handlers otherwise network class might not work properly ! -class PbftSyncPacketHandler final : public tarcap::PbftSyncPacketHandler { - public: - using tarcap::PbftSyncPacketHandler::PbftSyncPacketHandler; - - protected: - PeriodData decodePeriodData(const dev::RLP& period_data_rlp) const override; - std::vector> decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const override; -}; - -} // namespace taraxa::network::tarcap::v1 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/votes_bundle_packet_handler.hpp deleted file mode 100644 index d4614fff02..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v1/votes_bundle_packet_handler.hpp +++ /dev/null @@ -1,20 +0,0 @@ -#pragma once - -#include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" - -namespace taraxa::network::tarcap::v1 { - -// V1 packets handlers must be derived from latest packets handlers otherwise network class might not work properly ! -class VotesBundlePacketHandler final : public tarcap::VotesBundlePacketHandler { - public: - using tarcap::VotesBundlePacketHandler::VotesBundlePacketHandler; - - void sendPbftVotesBundle(const std::shared_ptr& peer, - std::vector>&& votes) override; - - private: - void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; -}; - -} // namespace taraxa::network::tarcap::v1 diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index a12cede4d6..b5ecbed955 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -8,9 +8,12 @@ #include #include "config/version.hpp" +#include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/status_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v1/init_packets_handlers.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "network/tarcap/stats/node_stats.hpp" #include "network/tarcap/stats/time_period_packets_stats.hpp" @@ -67,18 +70,8 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi // Create taraxa capabilities dev::p2p::Host::CapabilitiesFactory constructCapabilities = [&](std::weak_ptr host) { assert(!host.expired()); - - const size_t kV1NetworkVersion = 1; - assert(kV1NetworkVersion < TARAXA_NET_VERSION); - dev::p2p::Host::CapabilityList capabilities; - // Register old version (V1) of taraxa capability - auto v1_tarcap = std::make_shared( - kV1NetworkVersion, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, - pbft_mgr, pbft_chain, vote_mgr, dag_mgr, trx_mgr, slashing_manager, network::tarcap::v1::kInitV1Handlers); - capabilities.emplace_back(v1_tarcap); - // Register latest version of taraxa capability auto latest_tarcap = std::make_shared( TARAXA_NET_VERSION, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/readme.md b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/readme.md similarity index 100% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v1/readme.md rename to libraries/core_libs/network/src/tarcap/packets_handlers/latest/readme.md diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_next_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_next_votes_bundle_packet_handler.cpp deleted file mode 100644 index dc6be20eed..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_next_votes_bundle_packet_handler.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include "network/tarcap/packets_handlers/v1/get_next_votes_bundle_packet_handler.hpp" - -#include "pbft/pbft_manager.hpp" -#include "vote_manager/vote_manager.hpp" - -namespace taraxa::network::tarcap::v1 { - -void GetNextVotesBundlePacketHandler::sendPbftVotesBundle(const std::shared_ptr &peer, - std::vector> &&votes) { - if (votes.empty()) { - return; - } - - size_t index = 0; - while (index < votes.size()) { - const size_t count = std::min(static_cast(kMaxVotesInBundleRlp), votes.size() - index); - dev::RLPStream s(count); - for (auto i = index; i < index + count; i++) { - const auto &vote = votes[i]; - s.appendRaw(vote->rlp(true, false)); - LOG(log_dg_) << "Send vote " << vote->getHash() << " to peer " << peer->getId(); - } - - if (sealAndSend(peer->getId(), SubprotocolPacketType::VotesBundlePacket, std::move(s))) { - LOG(log_dg_) << count << " PBFT votes to were sent to " << peer->getId(); - for (auto i = index; i < index + count; i++) { - peer->markVoteAsKnown(votes[i]->getHash()); - } - } - - index += count; - } -} - -} // namespace taraxa::network::tarcap::v1 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.cpp deleted file mode 100644 index ba31e44e43..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.cpp +++ /dev/null @@ -1,76 +0,0 @@ -#include "network/tarcap/packets_handlers/v1/get_pbft_sync_packet_handler.hpp" - -#include "network/tarcap/shared_states/pbft_syncing_state.hpp" -#include "pbft/pbft_chain.hpp" -#include "storage/storage.hpp" -#include "vote/vote.hpp" -#include "vote_manager/vote_manager.hpp" - -namespace taraxa::network::tarcap::v1 { - -void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr &peer, PbftPeriod from_period, - size_t blocks_to_transfer, bool pbft_chain_synced) { - const auto &peer_id = peer->getId(); - LOG(log_tr_) << "sendPbftBlocks: peer want to sync from pbft chain height " << from_period << ", will send at most " - << blocks_to_transfer << " pbft blocks to " << peer_id; - - // Transform period data rlp from v2 to v1 - auto transformPeriodDataRlpToV1 = [](const dev::bytes &period_data_v2) -> dev::bytes { - // Create PeriodData old(v1) rlp format - PeriodData period_data(period_data_v2); - - dev::RLPStream period_data_rlp(PeriodData::kRlpItemCount); - period_data_rlp.appendRaw(period_data.pbft_blk->rlp(true)); - period_data_rlp.appendList(period_data.previous_block_cert_votes.size()); - for (auto const &v : period_data.previous_block_cert_votes) { - period_data_rlp.appendRaw(v->rlp(true)); - } - period_data_rlp.appendList(period_data.dag_blocks.size()); - for (auto const &b : period_data.dag_blocks) { - period_data_rlp.appendRaw(b.rlp(true)); - } - period_data_rlp.appendList(period_data.transactions.size()); - for (auto const &t : period_data.transactions) { - period_data_rlp.appendRaw(t->rlp()); - } - - return period_data_rlp.invalidate(); - }; - - for (auto block_period = from_period; block_period < from_period + blocks_to_transfer; block_period++) { - bool last_block = (block_period == from_period + blocks_to_transfer - 1); - auto data = db_->getPeriodDataRaw(block_period); - - if (data.size() == 0) { - // This can happen when switching from light node to full node setting - LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; - return; - } - - dev::RLPStream s; - if (pbft_chain_synced && last_block) { - s.appendList(3); - s << last_block; - s.appendRaw(transformPeriodDataRlpToV1(data)); - - // Latest finalized block cert votes are saved in db as reward votes for new blocks - const auto votes = vote_mgr_->getRewardVotes(); - s.appendList(votes.size()); - for (const auto &vote : votes) { - s.appendRaw(vote->rlp(true)); - } - } else { - s.appendList(2); - s << last_block; - s.appendRaw(transformPeriodDataRlpToV1(data)); - } - - LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; - sealAndSend(peer_id, SubprotocolPacketType::PbftSyncPacket, std::move(s)); - if (pbft_chain_synced && last_block) { - peer->syncing_ = false; - } - } -} - -} // namespace taraxa::network::tarcap::v1 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v1/pbft_sync_packet_handler.cpp deleted file mode 100644 index 80bac3e977..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/pbft_sync_packet_handler.cpp +++ /dev/null @@ -1,41 +0,0 @@ -#include "network/tarcap/packets_handlers/v1/pbft_sync_packet_handler.hpp" - -#include "network/tarcap/shared_states/pbft_syncing_state.hpp" -#include "pbft/pbft_chain.hpp" -#include "pbft/pbft_manager.hpp" -#include "transaction/transaction_manager.hpp" -#include "vote/vote.hpp" - -namespace taraxa::network::tarcap::v1 { - -PeriodData PbftSyncPacketHandler::decodePeriodData(const dev::RLP& period_data_rlp) const { - auto it = period_data_rlp.begin(); - - PeriodData period_data; - period_data.pbft_blk = std::make_shared(*it++); - for (auto const vote_rlp : *it++) { - period_data.previous_block_cert_votes.emplace_back(std::make_shared(vote_rlp)); - } - for (auto const dag_block_rlp : *it++) { - period_data.dag_blocks.emplace_back(dag_block_rlp); - } - for (auto const trx_rlp : *it) { - period_data.transactions.emplace_back(std::make_shared(trx_rlp)); - } - - return period_data; -} - -std::vector> PbftSyncPacketHandler::decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const { - std::vector> votes; - const auto cert_votes_count = votes_bundle_rlp.itemCount(); - votes.reserve(cert_votes_count); - - for (size_t i = 0; i < cert_votes_count; i++) { - votes.emplace_back(std::make_shared(votes_bundle_rlp[i].data().toBytes())); - } - - return votes; -} - -} // namespace taraxa::network::tarcap::v1 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v1/votes_bundle_packet_handler.cpp deleted file mode 100644 index 99a5f8dc70..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v1/votes_bundle_packet_handler.cpp +++ /dev/null @@ -1,156 +0,0 @@ -#include "network/tarcap/packets_handlers/v1/votes_bundle_packet_handler.hpp" - -#include "pbft/pbft_manager.hpp" -#include "vote_manager/vote_manager.hpp" - -namespace taraxa::network::tarcap::v1 { - -void VotesBundlePacketHandler::validatePacketRlpFormat( - [[maybe_unused]] const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items == 0 || items > kMaxVotesInBundleRlp) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kMaxVotesInBundleRlp); - } -} - -void VotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { - const auto reference_vote = std::make_shared(packet_data.rlp_[0]); - - const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - const auto votes_bundle_pbft_period = reference_vote->getPeriod(); - const auto votes_bundle_pbft_round = reference_vote->getRound(); - const auto votes_bundle_votes_type = reference_vote->getType(); - const auto votes_bundle_voted_block = reference_vote->getBlockHash(); - - // Votes sync bundles are allowed to cotain only votes bundles of the same type, period, round and step so if first - // vote is irrelevant, all of them are - if (!isPbftRelevantVote(reference_vote)) { - LOG(log_wr_) << "Drop votes sync bundle as it is irrelevant for current pbft state. Votes (period, round, step) = (" - << votes_bundle_pbft_period << ", " << votes_bundle_pbft_round << ", " << reference_vote->getStep() - << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round - << ", " << pbft_mgr_->getPbftStep() << ")"; - return; - } - - // VotesBundlePacket does not support propose votes - if (votes_bundle_votes_type == PbftVoteTypes::propose_vote) { - LOG(log_er_) << "Dropping votes sync packet due to received \"propose_votes\" votes from " - << packet_data.from_node_id_ << ". The peer may be a malicious player, will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); - return; - } - - std::vector> votes; - blk_hash_t next_votes_bundle_voted_block = kNullBlockHash; - - const auto next_votes_count = packet_data.rlp_.itemCount(); - for (size_t i = 0; i < next_votes_count; i++) { - auto vote = std::make_shared(packet_data.rlp_[i]); - peer->markVoteAsKnown(vote->getHash()); - - // Do not process vote that has already been validated - if (vote_mgr_->voteAlreadyValidated(vote->getHash())) { - LOG(log_dg_) << "Received vote " << vote->getHash() << " has already been validated"; - continue; - } - - // Next votes bundle can contain votes for kNullBlockHash as well as some specific block hash - if (vote->getType() == PbftVoteTypes::next_vote) { - if (next_votes_bundle_voted_block == kNullBlockHash && vote->getBlockHash() != kNullBlockHash) { - // initialize voted value with first block hash not equal to kNullBlockHash - next_votes_bundle_voted_block = vote->getBlockHash(); - } - - if (vote->getBlockHash() != kNullBlockHash && vote->getBlockHash() != next_votes_bundle_voted_block) { - // we see different voted value, so bundle is invalid - LOG(log_er_) << "Received next votes bundle with unmatched voted values(" << next_votes_bundle_voted_block - << ", " << vote->getBlockHash() << ") from " << packet_data.from_node_id_ - << ". The peer may be a malicious player, will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); - return; - } - } else { - // Other votes bundles can contain votes only for 1 specific block hash - if (vote->getBlockHash() != votes_bundle_voted_block) { - // we see different voted value, so bundle is invalid - LOG(log_er_) << "Received votes bundle with unmatched voted values(" << votes_bundle_voted_block << ", " - << vote->getBlockHash() << ") from " << packet_data.from_node_id_ - << ". The peer may be a malicious player, will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); - return; - } - } - - if (vote->getType() != votes_bundle_votes_type) { - LOG(log_er_) << "Received votes bundle with unmatched types from " << packet_data.from_node_id_ - << ". The peer may be a malicious player, will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); - return; - } - - if (vote->getPeriod() != votes_bundle_pbft_period) { - LOG(log_er_) << "Received votes bundle with unmatched periods from " << packet_data.from_node_id_ - << ". The peer may be a malicious player, will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); - return; - } - - if (vote->getRound() != votes_bundle_pbft_round) { - LOG(log_er_) << "Received votes bundle with unmatched rounds from " << packet_data.from_node_id_ - << ". The peer may be a malicious player, will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); - return; - } - - LOG(log_dg_) << "Received sync vote " << vote->getHash().abridged(); - - // Process processStandardVote is called with false in case of next votes bundle -> does not check max boundaries - // for round and step to actually being able to sync the current round in case network is stalled - bool check_max_round_step = true; - if (votes_bundle_votes_type == PbftVoteTypes::cert_vote || votes_bundle_votes_type == PbftVoteTypes::next_vote) { - check_max_round_step = false; - } - - if (!processVote(vote, nullptr, peer, check_max_round_step)) { - continue; - } - - votes.push_back(std::move(vote)); - } - - LOG(log_nf_) << "Received " << next_votes_count << " (processed " << votes.size() << " ) sync votes from peer " - << packet_data.from_node_id_ << " node current round " << current_pbft_round << ", peer pbft round " - << votes_bundle_pbft_round; - - onNewPbftVotesBundle(votes, false, packet_data.from_node_id_); -} - -void VotesBundlePacketHandler::sendPbftVotesBundle(const std::shared_ptr &peer, - std::vector> &&votes) { - if (votes.empty()) { - return; - } - - size_t index = 0; - while (index < votes.size()) { - const size_t count = std::min(static_cast(kMaxVotesInBundleRlp), votes.size() - index); - dev::RLPStream s(count); - for (auto i = index; i < index + count; i++) { - const auto &vote = votes[i]; - s.appendRaw(vote->rlp(true, false)); - LOG(log_dg_) << "Send vote " << vote->getHash() << " to peer " << peer->getId(); - } - - if (sealAndSend(peer->getId(), SubprotocolPacketType::VotesBundlePacket, std::move(s))) { - LOG(log_dg_) << count << " PBFT votes to were sent to " << peer->getId(); - for (auto i = index; i < index + count; i++) { - peer->markVoteAsKnown(votes[i]->getHash()); - } - } - - index += count; - } -} - -} // namespace taraxa::network::tarcap::v1 From c5e5427594db2db2ab9646ef6065aed67514188e Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 1 Nov 2023 14:01:28 -0700 Subject: [PATCH 36/72] adjust aspen hf config, fix tests and update evm --- .../config_jsons/default/default_genesis.json | 4 ++++ .../config_jsons/devnet/devnet_genesis.json | 4 ++++ .../config_jsons/mainnet/mainnet_genesis.json | 4 ++++ .../config_jsons/testnet/testnet_genesis.json | 6 ++++- libraries/config/include/config/hardfork.hpp | 12 ++++++++++ libraries/config/src/hardfork.cpp | 23 +++++++++++++++---- submodules/taraxa-evm | 2 +- tests/final_chain_test.cpp | 12 ++++++---- tests/rewards_stats_test.cpp | 3 ++- tests/test_util/src/test_util.cpp | 21 ++++++++++++++++- 10 files changed, 78 insertions(+), 13 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index cad60f15cc..bad7773eda 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -105,6 +105,10 @@ "magnolia_hf" : { "block_num" : 0, "jail_time": 163459 + }, + "aspen_hf" : { + "block_num" : 0, + "mas_supply": "0x26C62AD77DC602DAE0000000" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index e8a9fe815c..f5ce89962e 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -271,6 +271,10 @@ "magnolia_hf": { "block_num": 0, "jail_time": 1000 + }, + "aspen_hf" : { + "block_num" : 0, + "mas_supply": "0x26C62AD77DC602DAE0000000" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 747257b6b1..63769f1607 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1640,6 +1640,10 @@ "magnolia_hf" : { "block_num" : 5730000, "jail_time": 163459 + }, + "aspen_hf" : { + "block_num" : 0, + "mas_supply": "0x26C62AD77DC602DAE0000000" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 79c2f1911a..ddbaa97574 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -140,6 +140,10 @@ "magnolia_hf": { "block_num": 297000, "jail_time": 163459 + }, + "aspen_hf" : { + "block_num" : 0, + "mas_supply": "0x26C62AD77DC602DAE0000000" } } -} \ No newline at end of file +} diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 7246e9932c..6cd63bae5e 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -23,6 +23,15 @@ struct MagnoliaHardfork { Json::Value enc_json(const MagnoliaHardfork& obj); void dec_json(const Json::Value& json, MagnoliaHardfork& obj); +struct AspenHardfork { + uint64_t block_num{0}; + taraxa::uint256_t max_supply{"0x26C62AD77DC602DAE0000000"}; // 12 Billion + + HAS_RLP_FIELDS +}; +Json::Value enc_json(const AspenHardfork& obj); +void dec_json(const Json::Value& json, AspenHardfork& obj); + struct HardforksConfig { // disable it by default (set to max uint64) uint64_t fix_redelegate_block_num = -1; @@ -50,6 +59,9 @@ struct HardforksConfig { // participate in consensus MagnoliaHardfork magnolia_hf; + // Aspen hardfork implements new yield curve + AspenHardfork aspen_hf; + HAS_RLP_FIELDS }; diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index 6b4f7d5cee..fcb3e86529 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -29,6 +29,19 @@ void dec_json(const Json::Value& json, MagnoliaHardfork& obj) { } RLP_FIELDS_DEFINE(MagnoliaHardfork, block_num, jail_time) +Json::Value enc_json(const AspenHardfork& obj) { + Json::Value json(Json::objectValue); + json["block_num"] = dev::toJS(obj.block_num); + json["max_supply"] = dev::toJS(obj.max_supply); + return json; +} + +void dec_json(const Json::Value& json, AspenHardfork& obj) { + obj.block_num = dev::getUInt(json["block_num"]); + obj.max_supply = dev::jsToU256(json["max_supply"].asString()); +} +RLP_FIELDS_DEFINE(AspenHardfork, block_num, max_supply) + Json::Value enc_json(const HardforksConfig& obj) { Json::Value json(Json::objectValue); json["fix_redelegate_block_num"] = dev::toJS(obj.fix_redelegate_block_num); @@ -44,6 +57,7 @@ Json::Value enc_json(const HardforksConfig& obj) { } json["magnolia_hf"] = enc_json(obj.magnolia_hf); + json["aspen_hf"] = enc_json(obj.aspen_hf); return json; } @@ -65,9 +79,10 @@ void dec_json(const Json::Value& json, HardforksConfig& obj) { obj.rewards_distribution_frequency[dev::getUInt(itr.key())] = dev::getUInt(*itr); } } - if (const auto& e = json["magnolia_hf"]) { - dec_json(e, obj.magnolia_hf); - } + + dec_json(json["magnolia_hf"], obj.magnolia_hf); + dec_json(json["aspen_hf"], obj.aspen_hf); } -RLP_FIELDS_DEFINE(HardforksConfig, fix_redelegate_block_num, redelegations, rewards_distribution_frequency, magnolia_hf) +RLP_FIELDS_DEFINE(HardforksConfig, fix_redelegate_block_num, redelegations, rewards_distribution_frequency, magnolia_hf, + aspen_hf) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index c067e03759..c149a088c5 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit c067e03759ca18fbe98c75d5857cfcd840d46e6a +Subproject commit c149a088c560376d159e1c6b76be75112076e51a diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index ed060a58a5..4f7685b1d1 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -191,9 +191,10 @@ struct FinalChainTest : WithDataDir { TEST_F(FinalChainTest, initial_balances) { cfg.genesis.state.initial_balances = {}; - cfg.genesis.state.initial_balances[addr_t::random()] = 1; - cfg.genesis.state.initial_balances[addr_t::random()] = 1000; - cfg.genesis.state.initial_balances[addr_t::random()] = 100000; + cfg.genesis.state.initial_balances[addr_t::random()] = taraxa::uint256_t("0x16345785D8A0000"); // 1 + cfg.genesis.state.initial_balances[addr_t::random()] = taraxa::uint256_t("0x56BC75E2D63100000"); // 1k + cfg.genesis.state.initial_balances[addr_t::random()] = + taraxa::uint256_t("0x204FCE5E3E25026110000000"); // 10 Billion init(); } @@ -202,7 +203,7 @@ TEST_F(FinalChainTest, contract) { const auto& addr = sender_keys.address(); const auto& sk = sender_keys.secret(); cfg.genesis.state.initial_balances = {}; - cfg.genesis.state.initial_balances[addr] = 100000; + cfg.genesis.state.initial_balances[addr] = taraxa::uint256_t("0x204FCE5E3E25026110000000"); // 10 Billion init(); auto nonce = 0; auto trx = std::make_shared(nonce++, 0, 0, 1000000, dev::fromHex(samples::greeter_contract_code), sk); @@ -253,7 +254,8 @@ TEST_F(FinalChainTest, coin_transfers) { keys.reserve(NUM_ACCS); for (size_t i = 0; i < NUM_ACCS; ++i) { const auto& k = keys.emplace_back(dev::KeyPair::create()); - cfg.genesis.state.initial_balances[k.address()] = std::numeric_limits::max() / NUM_ACCS; + cfg.genesis.state.initial_balances[k.address()] = + taraxa::uint256_t("0x204FCE5E3E25026110000000") /* 10 Billion */ / NUM_ACCS; } init(); diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp index 53daec009f..95edd73e0e 100644 --- a/tests/rewards_stats_test.cpp +++ b/tests/rewards_stats_test.cpp @@ -20,7 +20,8 @@ struct RewardsStatsTest : NodesTest {}; class TestableRewardsStats : public rewards::Stats { public: TestableRewardsStats(const HardforksConfig::RewardsDistributionMap& rdm, std::shared_ptr db) - : rewards::Stats(100, HardforksConfig{0, {}, rdm, MagnoliaHardfork{0, 0}}, db, [](auto) { return 100; }) {} + : rewards::Stats(100, HardforksConfig{0, {}, rdm, MagnoliaHardfork{0, 0}, AspenHardfork{0, 0}}, db, + [](auto) { return 100; }) {} auto getStats() { return blocks_stats_; } }; diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index b1ca6064af..536edafc22 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -260,10 +260,29 @@ std::vector NodesTest::make_node_cfgs(size_t total_count taraxa::state_api::BalanceMap initial_balances; std::vector initial_validators; + // Calculate initial balance based on AspenHf.MaxSupply so "Yield = (MaxSupply - Genesis Balances Sum) / Genesis + // Balances Sum = 20% + + // Yield [%] = 100 * (max_supply - total_supply) / total_supply + // Yield * total_supply = 100 * max_supply - 100 * total_supply + // (Yield * total_supply) + (100 * total_supply) = 100 * max_supply + // total_supply * (100 + yield) = 100 * max_supply + // total_supply = 100 * max_supply / (100 + yield) + // total_supply = num_of_nodes * init_balance + // num_of_nodes * init_balance = 100 * max_supply / (100 + yield) + // init_balance = 100 * max_supply / ((100 + yield) * num_of_nodes) + + const taraxa::uint256_t yield{20}; // [%] + const taraxa::uint256_t hundred{100}; + const taraxa::uint256_t num_of_nodes{total_count}; + const taraxa::uint256_t max_supply = ret_configs.back().genesis.state.hardforks.aspen_hf.max_supply; + + const taraxa::uint256_t init_balance = (hundred * max_supply) / ((hundred + yield) * num_of_nodes); + for (size_t idx = 0; idx < total_count; idx++) { const auto& cfg = ret_configs[idx]; const auto& node_addr = dev::toAddress(cfg.node_secret); - initial_balances[node_addr] = 9007199254740991; + initial_balances[node_addr] = init_balance; if (idx >= validators_count) { continue; From 9f03e3ac9426a603f2185f39e258535eff050c05 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 2 Nov 2023 14:30:55 -0700 Subject: [PATCH 37/72] added rpc calls taraxa_yield and taraxa_totalSupply --- doc/RPC.md | 55 +++++++++++++++++++ .../include/final_chain/final_chain.hpp | 13 +++++ .../include/final_chain/state_api.hpp | 2 + .../consensus/src/final_chain/final_chain.cpp | 4 ++ .../consensus/src/final_chain/state_api.cpp | 15 +++++ libraries/core_libs/network/rpc/Taraxa.cpp | 28 ++++++++++ libraries/core_libs/network/rpc/Taraxa.h | 2 + .../core_libs/network/rpc/Taraxa.jsonrpc.json | 12 ++++ .../core_libs/network/rpc/TaraxaClient.h | 18 ++++++ libraries/core_libs/network/rpc/TaraxaFace.h | 16 ++++++ 10 files changed, 165 insertions(+) diff --git a/doc/RPC.md b/doc/RPC.md index 72f3d5f671..2e8f376516 100644 --- a/doc/RPC.md +++ b/doc/RPC.md @@ -511,6 +511,61 @@ curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getChainStats","params":[ } ``` +### taraxa_yield + +Returns yield for specified period + +#### Parameters + +`QUANTITY` - period + +#### Returns + +`String` - yield + +To transform returned yield to percents -> ```decimal(yield) / 1e4```\ +To transform returned yield to fraction -> ```decimal(yield) / 1e6``` + +#### Example + +```json +// Request +curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_yield","params":["0x1D533B"],"id":1}' + +// Result +{ + "id": 1, + "jsonrpc": "2.0", + "result": "0x30D40" +} +``` + +### taraxa_totalSupply + +Returns total supply for specified period + +#### Parameters + +`QUANTITY` - period + +#### Returns + +`String` - total supply + +#### Example + +```json +// Request +curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_totalSupply","params":["0x1D533B"],"id":1}' + +// Result +{ + "id": 1, + "jsonrpc": "2.0", + "result": "0x204FCE5E3E25026110000000" +} +``` + ## Test API ### get_sortition_change diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 70e95043cd..30b91bc585 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -229,6 +229,19 @@ class FinalChain { virtual void wait_for_finalized() = 0; virtual std::vector dpos_validators_total_stakes(EthBlockNumber blk_num) const = 0; + + /** + * @param blk_num + * @return yield + */ + virtual uint64_t dpos_yield(EthBlockNumber blk_num) const = 0; + + /** + * @param blk_num + * @return total supply + */ + virtual u256 dpos_total_supply(EthBlockNumber blk_num) const = 0; + // TODO move out of here: std::pair getBalance(addr_t const& addr) const { diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index 17a4d148cb..a8122436b4 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -59,6 +59,8 @@ class StateAPI { u256 get_staking_balance(EthBlockNumber blk_num, const addr_t& addr) const; vrf_wrapper::vrf_pk_t dpos_get_vrf_key(EthBlockNumber blk_num, const addr_t& addr) const; std::vector dpos_validators_total_stakes(EthBlockNumber blk_num) const; + uint64_t dpos_yield(EthBlockNumber blk_num) const; + u256 dpos_total_supply(EthBlockNumber blk_num) const; }; /** @} */ diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index c39ef5a6fb..4c339f6c5b 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -453,6 +453,10 @@ class FinalChainImpl final : public FinalChain { finalized_cv_.wait_for(lck, std::chrono::milliseconds(10)); } + uint64_t dpos_yield(EthBlockNumber blk_num) const override { return state_api_.dpos_yield(blk_num); } + + u256 dpos_total_supply(EthBlockNumber blk_num) const override { return state_api_.dpos_total_supply(blk_num); } + private: std::shared_ptr get_transaction_hashes(std::optional n = {}) const { const auto& trxs = db_->getPeriodTransactions(last_if_absent(n)); diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index d9d68d2048..482084c1d2 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -258,4 +258,19 @@ std::vector StateAPI::dpos_validators_total_stakes(EthBlockNumbe return ret; } +uint64_t StateAPI::dpos_yield(EthBlockNumber blk_num) const { + ErrorHandler err_h; + auto ret = taraxa_evm_state_api_dpos_yield(this_c_, blk_num, err_h.cgo_part_); + err_h.check(); + return ret; +} + +u256 StateAPI::dpos_total_supply(EthBlockNumber blk_num) const { + u256 ret; + ErrorHandler err_h; + taraxa_evm_state_api_dpos_total_supply(this_c_, blk_num, decoder_cb_c(ret), err_h.cgo_part_); + err_h.check(); + return ret; +} + } // namespace taraxa::state_api diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index 59bb124b5e..00b06faac4 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -153,4 +153,32 @@ Json::Value Taraxa::taraxa_getChainStats() { return res; } +std::string Taraxa::taraxa_yield(const std::string& _period) { + try { + auto node = full_node_.lock(); + if (!node) { + BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INTERNAL_ERROR)); + } + + auto period = dev::jsToInt(_period); + return toJS(node->getFinalChain()->dpos_yield(period)); + } catch (...) { + BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); + } +} + +std::string Taraxa::taraxa_totalSupply(const std::string& _period) { + try { + auto node = full_node_.lock(); + if (!node) { + BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INTERNAL_ERROR)); + } + + auto period = dev::jsToInt(_period); + return toJS(node->getFinalChain()->dpos_total_supply(period)); + } catch (...) { + BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); + } +} + } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Taraxa.h b/libraries/core_libs/network/rpc/Taraxa.h index 39b32cca34..7412d266c2 100644 --- a/libraries/core_libs/network/rpc/Taraxa.h +++ b/libraries/core_libs/network/rpc/Taraxa.h @@ -29,6 +29,8 @@ class Taraxa : public TaraxaFace { virtual std::string taraxa_pbftBlockHashByPeriod(const std::string& _period) override; virtual Json::Value taraxa_getConfig() override; virtual Json::Value taraxa_getChainStats() override; + virtual std::string taraxa_yield(const std::string& _period) override; + virtual std::string taraxa_totalSupply(const std::string& _period) override; protected: std::weak_ptr full_node_; diff --git a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json index 15e5b2135c..eac23624fe 100644 --- a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json @@ -64,6 +64,18 @@ "params": [""], "order": [], "returns": "" + }, + { + "name": "taraxa_yield", + "params": [""], + "order": [], + "returns": "" + }, + { + "name": "taraxa_totalSupply", + "params": [""], + "order": [], + "returns": "" } ] diff --git a/libraries/core_libs/network/rpc/TaraxaClient.h b/libraries/core_libs/network/rpc/TaraxaClient.h index a8a3c105a1..341e4a576e 100644 --- a/libraries/core_libs/network/rpc/TaraxaClient.h +++ b/libraries/core_libs/network/rpc/TaraxaClient.h @@ -106,6 +106,24 @@ class TaraxaClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } + std::string taraxa_yield(const std::string& param1) throw(jsonrpc::JsonRpcException) { + Json::Value p; + p.append(param1); + Json::Value result = this->CallMethod("taraxa_yield", p); + if (result.isString()) + return result.asString(); + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } + std::string taraxa_totalSupply(const std::string& param1) throw(jsonrpc::JsonRpcException) { + Json::Value p; + p.append(param1); + Json::Value result = this->CallMethod("taraxa_totalSupply", p); + if (result.isString()) + return result.asString(); + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } }; } // namespace net diff --git a/libraries/core_libs/network/rpc/TaraxaFace.h b/libraries/core_libs/network/rpc/TaraxaFace.h index 3f8a19c448..ffb31ed6a6 100644 --- a/libraries/core_libs/network/rpc/TaraxaFace.h +++ b/libraries/core_libs/network/rpc/TaraxaFace.h @@ -44,6 +44,12 @@ class TaraxaFace : public ServerInterface { this->bindAndAddMethod(jsonrpc::Procedure("taraxa_pbftBlockHashByPeriod", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::TaraxaFace::taraxa_pbftBlockHashByPeriodI); + this->bindAndAddMethod( + jsonrpc::Procedure("taraxa_yield", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", NULL), + &taraxa::net::TaraxaFace::taraxa_yieldI); + this->bindAndAddMethod( + jsonrpc::Procedure("taraxa_totalSupply", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", NULL), + &taraxa::net::TaraxaFace::taraxa_totalSupplyI); } inline virtual void taraxa_protocolVersionI(const Json::Value &request, Json::Value &response) { @@ -82,6 +88,14 @@ class TaraxaFace : public ServerInterface { inline virtual void taraxa_pbftBlockHashByPeriodI(const Json::Value &request, Json::Value &response) { response = this->taraxa_pbftBlockHashByPeriod(request[0u].asString()); } + inline virtual void taraxa_yieldI(const Json::Value &request, Json::Value &response) { + (void)request; + response = this->taraxa_yield(request[0u].asString()); + } + inline virtual void taraxa_totalSupplyI(const Json::Value &request, Json::Value &response) { + (void)request; + response = this->taraxa_totalSupply(request[0u].asString()); + } virtual std::string taraxa_protocolVersion() = 0; virtual Json::Value taraxa_getVersion() = 0; @@ -93,6 +107,8 @@ class TaraxaFace : public ServerInterface { virtual Json::Value taraxa_getConfig() = 0; virtual Json::Value taraxa_getChainStats() = 0; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string ¶m1) = 0; + virtual std::string taraxa_yield(const std::string ¶m1) = 0; + virtual std::string taraxa_totalSupply(const std::string ¶m1) = 0; }; } // namespace net From 61271ff39f509716aa2fd5cd3011f2ab315f5164 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Mon, 6 Nov 2023 12:22:21 -0800 Subject: [PATCH 38/72] fix PR comments --- .../cli/include/cli/config_jsons/default/default_genesis.json | 2 +- .../cli/include/cli/config_jsons/devnet/devnet_genesis.json | 4 ++-- .../cli/include/cli/config_jsons/mainnet/mainnet_genesis.json | 4 ++-- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 4 ++-- libraries/core_libs/network/rpc/Taraxa.cpp | 4 ++++ 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index bad7773eda..ab0a5284b2 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -108,7 +108,7 @@ }, "aspen_hf" : { "block_num" : 0, - "mas_supply": "0x26C62AD77DC602DAE0000000" + "max_supply": "0x26C62AD77DC602DAE0000000" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index f5ce89962e..127c5de63a 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -273,8 +273,8 @@ "jail_time": 1000 }, "aspen_hf" : { - "block_num" : 0, - "mas_supply": "0x26C62AD77DC602DAE0000000" + "block_num" : -1, + "max_supply": "0x26C62AD77DC602DAE0000000" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 63769f1607..4335081045 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1642,8 +1642,8 @@ "jail_time": 163459 }, "aspen_hf" : { - "block_num" : 0, - "mas_supply": "0x26C62AD77DC602DAE0000000" + "block_num" : -1, + "max_supply": "0x26C62AD77DC602DAE0000000" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index ddbaa97574..cc40e90653 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -142,8 +142,8 @@ "jail_time": 163459 }, "aspen_hf" : { - "block_num" : 0, - "mas_supply": "0x26C62AD77DC602DAE0000000" + "block_num" : -1, + "max_supply": "0x26C62AD77DC602DAE0000000" } } } diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index 00b06faac4..d6341d14b5 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -160,9 +160,12 @@ std::string Taraxa::taraxa_yield(const std::string& _period) { BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INTERNAL_ERROR)); } + std::cout << "taraxa_yield period\n"; auto period = dev::jsToInt(_period); + std::cout << "taraxa_yield toJS\n"; return toJS(node->getFinalChain()->dpos_yield(period)); } catch (...) { + std::cout << "taraxa_yield exception\n"; BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); } } @@ -177,6 +180,7 @@ std::string Taraxa::taraxa_totalSupply(const std::string& _period) { auto period = dev::jsToInt(_period); return toJS(node->getFinalChain()->dpos_total_supply(period)); } catch (...) { + std::cout << "taraxa_totalSupply exception\n"; BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); } } From 29e12618faa6c1236d980e658467d2d857efa776 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 8 Nov 2023 12:38:54 -0800 Subject: [PATCH 39/72] fix yield & totalSupply rpc calls --- libraries/core_libs/network/rpc/TaraxaFace.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/libraries/core_libs/network/rpc/TaraxaFace.h b/libraries/core_libs/network/rpc/TaraxaFace.h index ffb31ed6a6..781afa9f4f 100644 --- a/libraries/core_libs/network/rpc/TaraxaFace.h +++ b/libraries/core_libs/network/rpc/TaraxaFace.h @@ -11,6 +11,8 @@ namespace taraxa { namespace net { class TaraxaFace : public ServerInterface { public: + static constexpr int JSON_ANY = 0; + TaraxaFace() { this->bindAndAddMethod( jsonrpc::Procedure("taraxa_protocolVersion", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL), @@ -45,11 +47,11 @@ class TaraxaFace : public ServerInterface { jsonrpc::JSON_STRING, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::TaraxaFace::taraxa_pbftBlockHashByPeriodI); this->bindAndAddMethod( - jsonrpc::Procedure("taraxa_yield", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", NULL), + jsonrpc::Procedure("taraxa_yield", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", JSON_ANY, NULL), &taraxa::net::TaraxaFace::taraxa_yieldI); - this->bindAndAddMethod( - jsonrpc::Procedure("taraxa_totalSupply", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", NULL), - &taraxa::net::TaraxaFace::taraxa_totalSupplyI); + this->bindAndAddMethod(jsonrpc::Procedure("taraxa_totalSupply", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, + "param1", JSON_ANY, NULL), + &taraxa::net::TaraxaFace::taraxa_totalSupplyI); } inline virtual void taraxa_protocolVersionI(const Json::Value &request, Json::Value &response) { From d86eba4e1fe7833098e9e4e0a1b375ee121bb224 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 8 Nov 2023 14:50:49 -0800 Subject: [PATCH 40/72] fix reading config block_num -1 value into uin64 variable --- libraries/config/include/config/hardfork.hpp | 2 +- libraries/config/src/hardfork.cpp | 7 ++++--- libraries/core_libs/network/rpc/Taraxa.cpp | 4 ---- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 6cd63bae5e..b1a1c38e34 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -24,7 +24,7 @@ Json::Value enc_json(const MagnoliaHardfork& obj); void dec_json(const Json::Value& json, MagnoliaHardfork& obj); struct AspenHardfork { - uint64_t block_num{0}; + uint64_t block_num = -1; taraxa::uint256_t max_supply{"0x26C62AD77DC602DAE0000000"}; // 12 Billion HAS_RLP_FIELDS diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index fcb3e86529..0833a14f7c 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -24,7 +24,7 @@ Json::Value enc_json(const MagnoliaHardfork& obj) { } void dec_json(const Json::Value& json, MagnoliaHardfork& obj) { - obj.block_num = dev::getUInt(json["block_num"]); + obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); obj.jail_time = dev::getUInt(json["jail_time"]); } RLP_FIELDS_DEFINE(MagnoliaHardfork, block_num, jail_time) @@ -37,7 +37,7 @@ Json::Value enc_json(const AspenHardfork& obj) { } void dec_json(const Json::Value& json, AspenHardfork& obj) { - obj.block_num = dev::getUInt(json["block_num"]); + obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); obj.max_supply = dev::jsToU256(json["max_supply"].asString()); } RLP_FIELDS_DEFINE(AspenHardfork, block_num, max_supply) @@ -63,7 +63,8 @@ Json::Value enc_json(const HardforksConfig& obj) { } void dec_json(const Json::Value& json, HardforksConfig& obj) { - obj.fix_redelegate_block_num = dev::getUInt(json["fix_redelegate_block_num"]); + obj.fix_redelegate_block_num = + json["fix_redelegate_block_num"].isUInt64() ? dev::getUInt(json["fix_redelegate_block_num"]) : uint64_t(-1); const auto& redelegations_json = json["redelegations"]; obj.redelegations = std::vector(redelegations_json.size()); diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index d6341d14b5..00b06faac4 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -160,12 +160,9 @@ std::string Taraxa::taraxa_yield(const std::string& _period) { BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INTERNAL_ERROR)); } - std::cout << "taraxa_yield period\n"; auto period = dev::jsToInt(_period); - std::cout << "taraxa_yield toJS\n"; return toJS(node->getFinalChain()->dpos_yield(period)); } catch (...) { - std::cout << "taraxa_yield exception\n"; BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); } } @@ -180,7 +177,6 @@ std::string Taraxa::taraxa_totalSupply(const std::string& _period) { auto period = dev::jsToInt(_period); return toJS(node->getFinalChain()->dpos_total_supply(period)); } catch (...) { - std::cout << "taraxa_totalSupply exception\n"; BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); } } From be5bd31be002970908c2b1be9c38a114d646fc2b Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 15 Nov 2023 16:55:39 -0800 Subject: [PATCH 41/72] change the way total supply is calculated due to light node limitations --- .../cli/include/cli/config_jsons/default/default_genesis.json | 3 ++- .../cli/include/cli/config_jsons/devnet/devnet_genesis.json | 3 ++- .../cli/include/cli/config_jsons/mainnet/mainnet_genesis.json | 3 ++- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 3 ++- libraries/config/include/config/hardfork.hpp | 3 +++ libraries/config/src/hardfork.cpp | 4 +++- 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index ab0a5284b2..fc767c26b0 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -108,7 +108,8 @@ }, "aspen_hf" : { "block_num" : 0, - "max_supply": "0x26C62AD77DC602DAE0000000" + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 127c5de63a..96939cd8dd 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -274,7 +274,8 @@ }, "aspen_hf" : { "block_num" : -1, - "max_supply": "0x26C62AD77DC602DAE0000000" + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 4335081045..6f04c36ac5 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1643,7 +1643,8 @@ }, "aspen_hf" : { "block_num" : -1, - "max_supply": "0x26C62AD77DC602DAE0000000" + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index cc40e90653..1104d33c47 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -143,7 +143,8 @@ }, "aspen_hf" : { "block_num" : -1, - "max_supply": "0x26C62AD77DC602DAE0000000" + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" } } } diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index b1a1c38e34..00b7ac73fa 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -26,6 +26,9 @@ void dec_json(const Json::Value& json, MagnoliaHardfork& obj); struct AspenHardfork { uint64_t block_num = -1; taraxa::uint256_t max_supply{"0x26C62AD77DC602DAE0000000"}; // 12 Billion + // total generated rewards from block 1 to block_num + // It is partially estimated for blocks between the aspen hf release block and actual aspen hf block_num + taraxa::uint256_t generated_rewards{0}; HAS_RLP_FIELDS }; diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index 0833a14f7c..4109f34d10 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -33,14 +33,16 @@ Json::Value enc_json(const AspenHardfork& obj) { Json::Value json(Json::objectValue); json["block_num"] = dev::toJS(obj.block_num); json["max_supply"] = dev::toJS(obj.max_supply); + json["generated_rewards"] = dev::toJS(obj.generated_rewards); return json; } void dec_json(const Json::Value& json, AspenHardfork& obj) { obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); obj.max_supply = dev::jsToU256(json["max_supply"].asString()); + obj.generated_rewards = dev::jsToU256(json["generated_rewards"].asString()); } -RLP_FIELDS_DEFINE(AspenHardfork, block_num, max_supply) +RLP_FIELDS_DEFINE(AspenHardfork, block_num, max_supply, generated_rewards) Json::Value enc_json(const HardforksConfig& obj) { Json::Value json(Json::objectValue); From 1bda2981f6c18c51a3979a16f2545ee9715f8f04 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 16 Nov 2023 12:46:59 -0800 Subject: [PATCH 42/72] update evm --- submodules/libBLS | 1 + 1 file changed, 1 insertion(+) create mode 160000 submodules/libBLS diff --git a/submodules/libBLS b/submodules/libBLS new file mode 160000 index 0000000000..6fb4d5c5a1 --- /dev/null +++ b/submodules/libBLS @@ -0,0 +1 @@ +Subproject commit 6fb4d5c5a1af80bcca82b95fc30e01b65c7cf235 From e397153cbf415a2ba495df1c500f191ead332e8a Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 30 Nov 2023 15:37:31 -0800 Subject: [PATCH 43/72] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index c149a088c5..a28555848f 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit c149a088c560376d159e1c6b76be75112076e51a +Subproject commit a28555848f6437161f1f4d352adb7370c4d0f93e From 91c9bb0dfc37c5967a36fea87a2281945136a6c6 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 23 Nov 2023 18:36:08 -0800 Subject: [PATCH 44/72] divide aspen hardfork into 2 parts --- .../config_jsons/default/default_genesis.json | 11 +++++-- .../config_jsons/devnet/devnet_genesis.json | 11 +++++-- .../config_jsons/mainnet/mainnet_genesis.json | 11 +++++-- .../config_jsons/testnet/testnet_genesis.json | 11 +++++-- libraries/config/include/config/hardfork.hpp | 21 ++++++++++++- libraries/config/src/hardfork.cpp | 30 +++++++++++++++++-- submodules/taraxa-evm | 2 +- tests/test_util/src/test_util.cpp | 2 +- 8 files changed, 81 insertions(+), 18 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index fc767c26b0..7853c54c72 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -107,9 +107,14 @@ "jail_time": 163459 }, "aspen_hf" : { - "block_num" : 0, - "max_supply": "0x26C62AD77DC602DAE0000000", - "generated_rewards": "0x0" + "part_one" : { + "block_num" : 0 + }, + "part_two" : { + "block_num" : 0, + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" + } } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 96939cd8dd..9337716ca9 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -273,9 +273,14 @@ "jail_time": 1000 }, "aspen_hf" : { - "block_num" : -1, - "max_supply": "0x26C62AD77DC602DAE0000000", - "generated_rewards": "0x0" + "part_one" : { + "block_num" : -1 + }, + "part_two" : { + "block_num" : -1, + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" + } } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 6f04c36ac5..9c40028dab 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1642,9 +1642,14 @@ "jail_time": 163459 }, "aspen_hf" : { - "block_num" : -1, - "max_supply": "0x26C62AD77DC602DAE0000000", - "generated_rewards": "0x0" + "part_one" : { + "block_num" : -1 + }, + "part_two" : { + "block_num" : -1, + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" + } } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 1104d33c47..b1028b353b 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -142,9 +142,14 @@ "jail_time": 163459 }, "aspen_hf" : { - "block_num" : -1, - "max_supply": "0x26C62AD77DC602DAE0000000", - "generated_rewards": "0x0" + "part_one" : { + "block_num" : -1 + }, + "part_two" : { + "block_num" : -1, + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" + } } } } diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 00b7ac73fa..19d77c2e1f 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -23,7 +23,15 @@ struct MagnoliaHardfork { Json::Value enc_json(const MagnoliaHardfork& obj); void dec_json(const Json::Value& json, MagnoliaHardfork& obj); -struct AspenHardfork { +struct AspenHfPartOne { + uint64_t block_num = -1; + + HAS_RLP_FIELDS +}; +Json::Value enc_json(const AspenHfPartOne& obj); +void dec_json(const Json::Value& json, AspenHfPartOne& obj); + +struct AspenHfPartTwo { uint64_t block_num = -1; taraxa::uint256_t max_supply{"0x26C62AD77DC602DAE0000000"}; // 12 Billion // total generated rewards from block 1 to block_num @@ -32,6 +40,17 @@ struct AspenHardfork { HAS_RLP_FIELDS }; +Json::Value enc_json(const AspenHfPartTwo& obj); +void dec_json(const Json::Value& json, AspenHfPartTwo& obj); + +struct AspenHardfork { + // Part 1 prepares db data that are required for part 2 to be functional + AspenHfPartOne part_one; + // Part 2 implements new yield curve + AspenHfPartTwo part_two; + + HAS_RLP_FIELDS +}; Json::Value enc_json(const AspenHardfork& obj); void dec_json(const Json::Value& json, AspenHardfork& obj); diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index 4109f34d10..d9a44973e9 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -29,7 +29,18 @@ void dec_json(const Json::Value& json, MagnoliaHardfork& obj) { } RLP_FIELDS_DEFINE(MagnoliaHardfork, block_num, jail_time) -Json::Value enc_json(const AspenHardfork& obj) { +Json::Value enc_json(const AspenHfPartOne& obj) { + Json::Value json(Json::objectValue); + json["block_num"] = dev::toJS(obj.block_num); + return json; +} + +void dec_json(const Json::Value& json, AspenHfPartOne& obj) { + obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); +} +RLP_FIELDS_DEFINE(AspenHfPartOne, block_num) + +Json::Value enc_json(const AspenHfPartTwo& obj) { Json::Value json(Json::objectValue); json["block_num"] = dev::toJS(obj.block_num); json["max_supply"] = dev::toJS(obj.max_supply); @@ -37,12 +48,25 @@ Json::Value enc_json(const AspenHardfork& obj) { return json; } -void dec_json(const Json::Value& json, AspenHardfork& obj) { +void dec_json(const Json::Value& json, AspenHfPartTwo& obj) { obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); obj.max_supply = dev::jsToU256(json["max_supply"].asString()); obj.generated_rewards = dev::jsToU256(json["generated_rewards"].asString()); } -RLP_FIELDS_DEFINE(AspenHardfork, block_num, max_supply, generated_rewards) +RLP_FIELDS_DEFINE(AspenHfPartTwo, block_num, max_supply, generated_rewards) + +Json::Value enc_json(const AspenHardfork& obj) { + Json::Value json(Json::objectValue); + json["part_one"] = enc_json(obj.part_one); + json["part_two"] = enc_json(obj.part_two); + return json; +} + +void dec_json(const Json::Value& json, AspenHardfork& obj) { + dec_json(json["part_one"], obj.part_one); + dec_json(json["part_two"], obj.part_two); +} +RLP_FIELDS_DEFINE(AspenHardfork, part_one, part_two) Json::Value enc_json(const HardforksConfig& obj) { Json::Value json(Json::objectValue); diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index a28555848f..ec1127a57d 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit a28555848f6437161f1f4d352adb7370c4d0f93e +Subproject commit ec1127a57d200ddbb1e73c544bdc38be122d7f1b diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index 536edafc22..95e6292612 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -275,7 +275,7 @@ std::vector NodesTest::make_node_cfgs(size_t total_count const taraxa::uint256_t yield{20}; // [%] const taraxa::uint256_t hundred{100}; const taraxa::uint256_t num_of_nodes{total_count}; - const taraxa::uint256_t max_supply = ret_configs.back().genesis.state.hardforks.aspen_hf.max_supply; + const taraxa::uint256_t max_supply = ret_configs.back().genesis.state.hardforks.aspen_hf.part_two.max_supply; const taraxa::uint256_t init_balance = (hundred * max_supply) / ((hundred + yield) * num_of_nodes); From 533e091f66ad27920fac90f5963f59247901dffc Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Fri, 1 Dec 2023 17:39:08 -0800 Subject: [PATCH 45/72] adjust aspen hf config + update evm --- .../config_jsons/default/default_genesis.json | 13 +++---- .../config_jsons/devnet/devnet_genesis.json | 13 +++---- .../config_jsons/mainnet/mainnet_genesis.json | 13 +++---- .../config_jsons/testnet/testnet_genesis.json | 13 +++---- libraries/config/include/config/hardfork.hpp | 25 +++--------- libraries/config/src/hardfork.cpp | 38 +++++-------------- submodules/libBLS | 1 - tests/test_util/src/test_util.cpp | 2 +- 8 files changed, 35 insertions(+), 83 deletions(-) delete mode 160000 submodules/libBLS diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index 7853c54c72..88206902e9 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -107,14 +107,11 @@ "jail_time": 163459 }, "aspen_hf" : { - "part_one" : { - "block_num" : 0 - }, - "part_two" : { - "block_num" : 0, - "max_supply": "0x26C62AD77DC602DAE0000000", - "generated_rewards": "0x0" - } + "block_num_part_one" : 0, + "block_num_part_one" : 0, + "block_num" : 0, + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 9337716ca9..bb2b7d42b0 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -273,14 +273,11 @@ "jail_time": 1000 }, "aspen_hf" : { - "part_one" : { - "block_num" : -1 - }, - "part_two" : { - "block_num" : -1, - "max_supply": "0x26C62AD77DC602DAE0000000", - "generated_rewards": "0x0" - } + "block_num_part_one" : -1, + "block_num_part_one" : -1, + "block_num" : 0, + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 9c40028dab..035d0951bf 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1642,14 +1642,11 @@ "jail_time": 163459 }, "aspen_hf" : { - "part_one" : { - "block_num" : -1 - }, - "part_two" : { - "block_num" : -1, - "max_supply": "0x26C62AD77DC602DAE0000000", - "generated_rewards": "0x0" - } + "block_num_part_one" : -1, + "block_num_part_one" : -1, + "block_num" : 0, + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index b1028b353b..c8da27fe39 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -142,14 +142,11 @@ "jail_time": 163459 }, "aspen_hf" : { - "part_one" : { - "block_num" : -1 - }, - "part_two" : { - "block_num" : -1, - "max_supply": "0x26C62AD77DC602DAE0000000", - "generated_rewards": "0x0" - } + "block_num_part_one" : -1, + "block_num_part_one" : -1, + "block_num" : 0, + "max_supply": "0x26C62AD77DC602DAE0000000", + "generated_rewards": "0x0" } } } diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 19d77c2e1f..46a00e1bd3 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -23,16 +23,12 @@ struct MagnoliaHardfork { Json::Value enc_json(const MagnoliaHardfork& obj); void dec_json(const Json::Value& json, MagnoliaHardfork& obj); -struct AspenHfPartOne { - uint64_t block_num = -1; - - HAS_RLP_FIELDS -}; -Json::Value enc_json(const AspenHfPartOne& obj); -void dec_json(const Json::Value& json, AspenHfPartOne& obj); +struct AspenHardfork { + // Part 1 prepares db data that are required for part 2 to be functional + uint64_t block_num_part_one{0}; + // Part 2 implements new yield curve + uint64_t block_num_part_two{0}; -struct AspenHfPartTwo { - uint64_t block_num = -1; taraxa::uint256_t max_supply{"0x26C62AD77DC602DAE0000000"}; // 12 Billion // total generated rewards from block 1 to block_num // It is partially estimated for blocks between the aspen hf release block and actual aspen hf block_num @@ -40,17 +36,6 @@ struct AspenHfPartTwo { HAS_RLP_FIELDS }; -Json::Value enc_json(const AspenHfPartTwo& obj); -void dec_json(const Json::Value& json, AspenHfPartTwo& obj); - -struct AspenHardfork { - // Part 1 prepares db data that are required for part 2 to be functional - AspenHfPartOne part_one; - // Part 2 implements new yield curve - AspenHfPartTwo part_two; - - HAS_RLP_FIELDS -}; Json::Value enc_json(const AspenHardfork& obj); void dec_json(const Json::Value& json, AspenHardfork& obj); diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index d9a44973e9..60e9d4e7bc 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -29,44 +29,24 @@ void dec_json(const Json::Value& json, MagnoliaHardfork& obj) { } RLP_FIELDS_DEFINE(MagnoliaHardfork, block_num, jail_time) -Json::Value enc_json(const AspenHfPartOne& obj) { - Json::Value json(Json::objectValue); - json["block_num"] = dev::toJS(obj.block_num); - return json; -} - -void dec_json(const Json::Value& json, AspenHfPartOne& obj) { - obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); -} -RLP_FIELDS_DEFINE(AspenHfPartOne, block_num) - -Json::Value enc_json(const AspenHfPartTwo& obj) { +Json::Value enc_json(const AspenHardfork& obj) { Json::Value json(Json::objectValue); - json["block_num"] = dev::toJS(obj.block_num); + json["block_num_part_one"] = dev::toJS(obj.block_num_part_one); + json["block_num_part_two"] = dev::toJS(obj.block_num_part_two); json["max_supply"] = dev::toJS(obj.max_supply); json["generated_rewards"] = dev::toJS(obj.generated_rewards); return json; } -void dec_json(const Json::Value& json, AspenHfPartTwo& obj) { - obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); +void dec_json(const Json::Value& json, AspenHardfork& obj) { + obj.block_num_part_one = + json["block_num_part_one"].isUInt64() ? dev::getUInt(json["block_num_part_one"]) : uint64_t(-1); + obj.block_num_part_two = + json["block_num_part_two"].isUInt64() ? dev::getUInt(json["block_num_part_two"]) : uint64_t(-1); obj.max_supply = dev::jsToU256(json["max_supply"].asString()); obj.generated_rewards = dev::jsToU256(json["generated_rewards"].asString()); } -RLP_FIELDS_DEFINE(AspenHfPartTwo, block_num, max_supply, generated_rewards) - -Json::Value enc_json(const AspenHardfork& obj) { - Json::Value json(Json::objectValue); - json["part_one"] = enc_json(obj.part_one); - json["part_two"] = enc_json(obj.part_two); - return json; -} - -void dec_json(const Json::Value& json, AspenHardfork& obj) { - dec_json(json["part_one"], obj.part_one); - dec_json(json["part_two"], obj.part_two); -} -RLP_FIELDS_DEFINE(AspenHardfork, part_one, part_two) +RLP_FIELDS_DEFINE(AspenHardfork, block_num_part_one, block_num_part_two, max_supply, generated_rewards) Json::Value enc_json(const HardforksConfig& obj) { Json::Value json(Json::objectValue); diff --git a/submodules/libBLS b/submodules/libBLS deleted file mode 160000 index 6fb4d5c5a1..0000000000 --- a/submodules/libBLS +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 6fb4d5c5a1af80bcca82b95fc30e01b65c7cf235 diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index 95e6292612..536edafc22 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -275,7 +275,7 @@ std::vector NodesTest::make_node_cfgs(size_t total_count const taraxa::uint256_t yield{20}; // [%] const taraxa::uint256_t hundred{100}; const taraxa::uint256_t num_of_nodes{total_count}; - const taraxa::uint256_t max_supply = ret_configs.back().genesis.state.hardforks.aspen_hf.part_two.max_supply; + const taraxa::uint256_t max_supply = ret_configs.back().genesis.state.hardforks.aspen_hf.max_supply; const taraxa::uint256_t init_balance = (hundred * max_supply) / ((hundred + yield) * num_of_nodes); From f45e07172d79f028b31b34092788d3473a3fbdd7 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 6 Dec 2023 11:43:16 -0800 Subject: [PATCH 46/72] set aspen hardfork config values for devnet testing --- .../include/cli/config_jsons/default/default_genesis.json | 3 +-- .../include/cli/config_jsons/devnet/devnet_genesis.json | 7 +++---- .../include/cli/config_jsons/mainnet/mainnet_genesis.json | 3 +-- .../include/cli/config_jsons/testnet/testnet_genesis.json | 3 +-- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index 88206902e9..e5363b8959 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -108,8 +108,7 @@ }, "aspen_hf" : { "block_num_part_one" : 0, - "block_num_part_one" : 0, - "block_num" : 0, + "block_num_part_two" : 0, "max_supply": "0x26C62AD77DC602DAE0000000", "generated_rewards": "0x0" } diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index bb2b7d42b0..7977554161 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -273,11 +273,10 @@ "jail_time": 1000 }, "aspen_hf" : { - "block_num_part_one" : -1, - "block_num_part_one" : -1, - "block_num" : 0, + "block_num_part_one" : 1000, + "block_num_part_two" : 2000, "max_supply": "0x26C62AD77DC602DAE0000000", - "generated_rewards": "0x0" + "generated_rewards": "0x16E59F7481A7EC1F60" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 035d0951bf..eb669ffc1a 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1643,8 +1643,7 @@ }, "aspen_hf" : { "block_num_part_one" : -1, - "block_num_part_one" : -1, - "block_num" : 0, + "block_num_part_two" : -1, "max_supply": "0x26C62AD77DC602DAE0000000", "generated_rewards": "0x0" } diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index c8da27fe39..3a7e7fc7e7 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -143,8 +143,7 @@ }, "aspen_hf" : { "block_num_part_one" : -1, - "block_num_part_one" : -1, - "block_num" : 0, + "block_num_part_two" : -1, "max_supply": "0x26C62AD77DC602DAE0000000", "generated_rewards": "0x0" } From 91a3e452d0db10d330e65a627f525c8b7c4796ec Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 21 Dec 2023 12:52:15 +0100 Subject: [PATCH 47/72] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index ec1127a57d..e2482ac2c8 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit ec1127a57d200ddbb1e73c544bdc38be122d7f1b +Subproject commit e2482ac2c8a17857a8d7557a1ce4edc626a4842e From 298ecf73821259dc01ee5e892598b097ed43ec8d Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Wed, 20 Dec 2023 13:19:09 +0100 Subject: [PATCH 48/72] chore: increase gas limit on devnet --- .../config_jsons/devnet/devnet_genesis.json | 4 +-- .../consensus/src/pbft/pbft_manager.cpp | 29 +++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 7977554161..741e35d766 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -237,13 +237,13 @@ "dag_blocks_size": "0x32", "ghost_path_move_back": "0x0", "lambda_ms": "0x5DC", - "gas_limit": "0x12C684C0" + "gas_limit": "0x3E95BA80" }, "dag": { "block_proposer": { "shard": 1 }, - "gas_limit": "0x1E0A6E0" + "gas_limit": "0x6422C40" }, "sortition": { "changes_count_for_average": 10, diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index b8e1682f21..00cf742dbe 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1690,6 +1690,35 @@ std::optional>>> PbftMan return std::nullopt; } + // Get all the ordered unique non-finalized transactions which should match period_data.transactions + std::unordered_set trx_set; + std::vector transactions_to_query; + for (auto const &dag_block : period_data.dag_blocks) { + for (auto const &trx_hash : dag_block.getTrxs()) { + if (trx_set.insert(trx_hash).second) { + transactions_to_query.emplace_back(trx_hash); + } + } + } + auto non_finalized_transactions = trx_mgr_->excludeFinalizedTransactions(transactions_to_query); + + if (non_finalized_transactions.size() != period_data.transactions.size()) { + LOG(log_er_) << "Synced PBFT block " << pbft_block_hash << " transactions count " << period_data.transactions.size() + << " incorrect, expected: " << non_finalized_transactions.size(); + sync_queue_.clear(); + net->handleMaliciousSyncPeer(node_id); + return std::nullopt; + } + for (uint32_t i = 0; i < period_data.transactions.size(); i++) { + if (!non_finalized_transactions.contains(period_data.transactions[i]->getHash())) { + LOG(log_er_) << "Synced PBFT block " << pbft_block_hash << " has incorrect transaction " + << period_data.transactions[i]->getHash(); + sync_queue_.clear(); + net->handleMaliciousSyncPeer(node_id); + return std::nullopt; + } + } + return std::optional>>>( {std::move(period_data), std::move(cert_votes)}); } From eba6969e4a7d7c75622b3cb058f479a9a425ff2b Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 21 Dec 2023 13:58:29 +0100 Subject: [PATCH 49/72] set release 1.6.0 config for testnet --- CMakeLists.txt | 4 ++-- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bfcd09a7aa..bb2972ee25 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,8 +2,8 @@ cmake_minimum_required(VERSION 3.20) # Set current version of the project set(TARAXA_MAJOR_VERSION 1) -set(TARAXA_MINOR_VERSION 5) -set(TARAXA_PATCH_VERSION 1) +set(TARAXA_MINOR_VERSION 6) +set(TARAXA_PATCH_VERSION 0) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 3a7e7fc7e7..19aed9409d 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -142,7 +142,7 @@ "jail_time": 163459 }, "aspen_hf" : { - "block_num_part_one" : -1, + "block_num_part_one" : 1000, "block_num_part_two" : -1, "max_supply": "0x26C62AD77DC602DAE0000000", "generated_rewards": "0x0" From f1fdb1661b607e79cc322a3824d910e8fddd8b5f Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Fri, 22 Dec 2023 11:11:49 +0100 Subject: [PATCH 50/72] set timestamp to Friday 22. December 2023 11:00:00 GMT --- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 19aed9409d..2cc401b560 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -4,7 +4,7 @@ "level": "0x0", "pivot": "0x0000000000000000000000000000000000000000000000000000000000000000", "sig": "0xb7e22d46c1ba94d5e8347b01d137b5c428fcbbeaf0a77fb024cbbf1517656ff00d04f7f25be608c321b0d7483c402c294ff46c49b265305d046a52236c0a363701", - "timestamp": "0x6527DFC0", + "timestamp": "0x65856c30", "tips": [], "transactions": [] }, From 34f50417af9c69dc2bfa597fc6fc80baf9c68b93 Mon Sep 17 00:00:00 2001 From: kstdl Date: Wed, 27 Dec 2023 16:10:54 +0100 Subject: [PATCH 51/72] Revert "chore: optimize dag block save" This reverts commit 55a9a6ebbc6001d966d1713c17eec36b8435bd67. --- .../src/transaction/transaction_manager.cpp | 20 +++++++++++-------- libraries/core_libs/node/src/node.cpp | 2 -- .../storage/include/storage/storage.hpp | 1 + libraries/core_libs/storage/src/storage.cpp | 11 ++++++++++ tests/transaction_test.cpp | 6 +++--- 5 files changed, 27 insertions(+), 13 deletions(-) diff --git a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp index bc78695816..1a2d62d2b2 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp @@ -166,24 +166,28 @@ void TransactionManager::saveTransactionsFromDagBlock(SharedTransactions const & std::vector accepted_transactions; accepted_transactions.reserve(trxs.size()); auto write_batch = db_->createWriteBatch(); + vec_trx_t trx_hashes; + std::transform(trxs.begin(), trxs.end(), std::back_inserter(trx_hashes), + [](std::shared_ptr const &t) { return t->getHash(); }); + { - // This lock synchronizes removing transactions from transactions memory pool with database insertion. + // This lock synchronizes inserting and removing transactions from transactions memory pool with database insertion. // Unique lock here makes sure that transactions we are removing are not reinserted in transactions_pool_ std::unique_lock transactions_lock(transactions_mutex_); - for (const auto &trx : trxs) { - auto const &trx_hash = trx->getHash(); + auto trx_in_db = db_->transactionsInDb(trx_hashes); + for (uint64_t i = 0; i < trxs.size(); i++) { + auto const &trx_hash = trx_hashes[i]; // We only save transaction if it has not already been saved + if (!trx_in_db[i]) { + db_->addTransactionToBatch(*trxs[i], write_batch); + nonfinalized_transactions_in_dag_.emplace(trx_hash, trxs[i]); + } if (transactions_pool_.erase(trx_hash)) { LOG(log_dg_) << "Transaction " << trx_hash << " removed from trx pool "; // Transactions are counted when included in DAG trx_count_++; accepted_transactions.emplace_back(trx_hash); - if (nonfinalized_transactions_in_dag_.emplace(trx_hash, trx).second) { - db_->addTransactionToBatch(*trx, write_batch); - } else { - LOG(log_er_) << "Transaction " << trx_hash << " removed from transaction pool multiple times"; - } } } db_->addStatusFieldToBatch(StatusDbField::TrxCount, trx_count_, write_batch); diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index 054c285ec6..a95d68b421 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -426,8 +426,6 @@ void FullNode::rebuildDb() { } stop_async = true; fut.wait(); - // Handles the race case if some blocks are still in the queue - pbft_mgr_->pushSyncedPbftBlocksIntoChain(); LOG(log_si_) << "Rebuild completed"; } diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index ed2d84b803..7e66789733 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -238,6 +238,7 @@ class DbStorage : public std::enable_shared_from_this { SharedTransactions getAllNonfinalizedTransactions(); bool transactionInDb(trx_hash_t const& hash); bool transactionFinalized(trx_hash_t const& hash); + std::vector transactionsInDb(std::vector const& trx_hashes); std::vector transactionsFinalized(std::vector const& trx_hashes); void addTransactionToBatch(Transaction const& trx, Batch& write_batch); void removeTransactionToBatch(trx_hash_t const& trx, Batch& write_batch); diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index 348348b244..048290e626 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -871,6 +871,17 @@ bool DbStorage::transactionFinalized(trx_hash_t const& hash) { return exist(toSlice(hash.asBytes()), Columns::trx_period); } +std::vector DbStorage::transactionsInDb(std::vector const& trx_hashes) { + std::vector result(trx_hashes.size(), false); + for (size_t i = 0; i < trx_hashes.size(); ++i) { + const auto key = trx_hashes[i].asBytes(); + if (exist(toSlice(key), Columns::transactions) || exist(toSlice(key), Columns::trx_period)) { + result[i] = true; + } + } + return result; +} + uint64_t DbStorage::getStatusField(StatusDbField const& field) { auto status = lookup(toSlice((uint8_t)field), Columns::status); if (!status.empty()) { diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index 4d6bd8e47f..53637d89a0 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -258,10 +258,10 @@ TEST_F(TransactionTest, transaction_concurrency) { TransactionManager trx_mgr(cfg, db, NewFinalChain(db, cfg), addr_t()); bool stopped = false; // Insert transactions to memory pool and keep trying to insert them again on separate thread, it should always fail - for (auto const& t : *g_signed_trx_samples) { - trx_mgr.insertTransaction(t); - } std::thread insertTrx([&trx_mgr, &stopped]() { + for (auto const& t : *g_signed_trx_samples) { + trx_mgr.insertTransaction(t); + } while (!stopped) { for (auto const& t : *g_signed_trx_samples) { EXPECT_FALSE(trx_mgr.insertTransaction(t).first); From 66540f7b2f3bb5447ac835bf83355bf2a7a52256 Mon Sep 17 00:00:00 2001 From: kstdl Date: Wed, 27 Dec 2023 16:15:02 +0100 Subject: [PATCH 52/72] chore: update testnet gensis timestamp --- .../cli/config_jsons/testnet/testnet_genesis.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 2cc401b560..c823e30132 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -4,7 +4,7 @@ "level": "0x0", "pivot": "0x0000000000000000000000000000000000000000000000000000000000000000", "sig": "0xb7e22d46c1ba94d5e8347b01d137b5c428fcbbeaf0a77fb024cbbf1517656ff00d04f7f25be608c321b0d7483c402c294ff46c49b265305d046a52236c0a363701", - "timestamp": "0x65856c30", + "timestamp": "0x658C3F5E", "tips": [], "transactions": [] }, @@ -141,11 +141,11 @@ "block_num": 297000, "jail_time": 163459 }, - "aspen_hf" : { - "block_num_part_one" : 1000, - "block_num_part_two" : -1, + "aspen_hf": { + "block_num_part_one": 1000, + "block_num_part_two": -1, "max_supply": "0x26C62AD77DC602DAE0000000", "generated_rewards": "0x0" } } -} +} \ No newline at end of file From 334d5309b2765a5d0aeb550407dd02514626bb1d Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 2 Jan 2024 15:26:51 +0100 Subject: [PATCH 53/72] fix: clang format config --- .clang-format | 2 +- .dockerignore | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.clang-format b/.clang-format index c2620f538c..623bc87195 100644 --- a/.clang-format +++ b/.clang-format @@ -2,5 +2,5 @@ Language: Cpp BasedOnStyle: Google ColumnLimit: 120 -... +--- diff --git a/.dockerignore b/.dockerignore index a97cbbc71d..0bdb8b8f6b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,6 +6,7 @@ !CMakeModules !tests !clang-format +!.clang-format !.gitmodules !CMakeLists.txt !LICENSE From 86939ac8dbaab6a8e36870756e0eeca54375d573 Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 2 Jan 2024 15:48:40 +0100 Subject: [PATCH 54/72] fix: EthFace formatting --- libraries/core_libs/network/rpc/EthFace.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/libraries/core_libs/network/rpc/EthFace.h b/libraries/core_libs/network/rpc/EthFace.h index 9309384223..5922368293 100644 --- a/libraries/core_libs/network/rpc/EthFace.h +++ b/libraries/core_libs/network/rpc/EthFace.h @@ -111,9 +111,10 @@ class EthFace : public ServerInterface { &taraxa::net::EthFace::eth_sendRawTransactionI); this->bindAndAddMethod(jsonrpc::Procedure("eth_syncing", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_syncingI); - this->bindAndAddMethod(jsonrpc::Procedure("eth_estimateGas", jsonrpc::PARAMS_BY_POSITION_WITH_OPTIONAL, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_OBJECT, "param2", JSON_ANY, NULL), - &taraxa::net::EthFace::eth_estimateGasI); + this->bindAndAddMethod( + jsonrpc::Procedure("eth_estimateGas", jsonrpc::PARAMS_BY_POSITION_WITH_OPTIONAL, jsonrpc::JSON_STRING, "param1", + jsonrpc::JSON_OBJECT, "param2", JSON_ANY, NULL), + &taraxa::net::EthFace::eth_estimateGasI); this->bindAndAddMethod(jsonrpc::Procedure("eth_chainId", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_chainIdI); } From 7330ffa5869b57593c81e9a42712dc9c8b15ad65 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 21 Dec 2023 13:58:29 +0100 Subject: [PATCH 55/72] set release 1.6.0 config for testnet --- CMakeLists.txt | 4 ++-- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cd796af9cd..bb2972ee25 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,8 +2,8 @@ cmake_minimum_required(VERSION 3.20) # Set current version of the project set(TARAXA_MAJOR_VERSION 1) -set(TARAXA_MINOR_VERSION 5) -set(TARAXA_PATCH_VERSION 3) +set(TARAXA_MINOR_VERSION 6) +set(TARAXA_PATCH_VERSION 0) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 3a7e7fc7e7..19aed9409d 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -142,7 +142,7 @@ "jail_time": 163459 }, "aspen_hf" : { - "block_num_part_one" : -1, + "block_num_part_one" : 1000, "block_num_part_two" : -1, "max_supply": "0x26C62AD77DC602DAE0000000", "generated_rewards": "0x0" From c558435ea3c98acbfddc4ffa233ea881d5f9e130 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Fri, 22 Dec 2023 11:11:49 +0100 Subject: [PATCH 56/72] set timestamp to Friday 22. December 2023 11:00:00 GMT --- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 19aed9409d..2cc401b560 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -4,7 +4,7 @@ "level": "0x0", "pivot": "0x0000000000000000000000000000000000000000000000000000000000000000", "sig": "0xb7e22d46c1ba94d5e8347b01d137b5c428fcbbeaf0a77fb024cbbf1517656ff00d04f7f25be608c321b0d7483c402c294ff46c49b265305d046a52236c0a363701", - "timestamp": "0x6527DFC0", + "timestamp": "0x65856c30", "tips": [], "transactions": [] }, From 3c3a1c79142e4dd6bb42eb8fb0b89783fe8daf2b Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 22 Jan 2024 15:24:15 +0100 Subject: [PATCH 57/72] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 80006ec4a1..c03717cbd4 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 80006ec4a12e5a9a9727a6a79fda1c432a398b48 +Subproject commit c03717cbd49d03a6634d29169156540a112d019a From 2d6e95672076d1b72203746a1b703dce2f224efc Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 24 Jan 2024 15:02:45 +0100 Subject: [PATCH 58/72] chore: rpc should always returns even lenght of hex value --- libraries/aleth/libdevcore/CommonJS.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/libraries/aleth/libdevcore/CommonJS.h b/libraries/aleth/libdevcore/CommonJS.h index d0c17b8595..b68a12b5cb 100644 --- a/libraries/aleth/libdevcore/CommonJS.h +++ b/libraries/aleth/libdevcore/CommonJS.h @@ -47,8 +47,14 @@ std::string toJS(SecureFixedHash const& _i) { template std::string toJS(T const& _i) { std::stringstream stream; - stream << "0x" << std::hex << _i; - return stream.str(); + stream << std::hex << _i; + std::string hex = stream.str(); + + // Prepend '0' if the length of the hex string is odd + if (hex.length() % 2 != 0) { + hex = "0" + hex; + } + return "0x" + hex; } enum class OnFailed { InterpretRaw, Empty, Throw }; From ab5d1fd99b7390228560c69fd26c3b59574e82f8 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 24 Jan 2024 15:57:03 +0100 Subject: [PATCH 59/72] Revert "chore: rpc should always returns even lenght of hex value" This reverts commit 2d6e95672076d1b72203746a1b703dce2f224efc. --- libraries/aleth/libdevcore/CommonJS.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/libraries/aleth/libdevcore/CommonJS.h b/libraries/aleth/libdevcore/CommonJS.h index b68a12b5cb..d0c17b8595 100644 --- a/libraries/aleth/libdevcore/CommonJS.h +++ b/libraries/aleth/libdevcore/CommonJS.h @@ -47,14 +47,8 @@ std::string toJS(SecureFixedHash const& _i) { template std::string toJS(T const& _i) { std::stringstream stream; - stream << std::hex << _i; - std::string hex = stream.str(); - - // Prepend '0' if the length of the hex string is odd - if (hex.length() % 2 != 0) { - hex = "0" + hex; - } - return "0x" + hex; + stream << "0x" << std::hex << _i; + return stream.str(); } enum class OnFailed { InterpretRaw, Empty, Throw }; From 0a6edd7532ae64a5cddc62d0009a176665e70614 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 24 Jan 2024 16:10:43 +0100 Subject: [PATCH 60/72] chore: fix return value of get storage --- .../core_libs/consensus/include/final_chain/final_chain.hpp | 2 +- .../core_libs/consensus/include/final_chain/state_api.hpp | 2 +- .../core_libs/consensus/src/final_chain/final_chain.cpp | 2 +- libraries/core_libs/consensus/src/final_chain/state_api.cpp | 6 ++++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 30b91bc585..5e029801c1 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -156,7 +156,7 @@ class FinalChain { * @param blk_n number of block we are getting state from * @return the value at this storage position */ - virtual u256 get_account_storage(addr_t const& addr, u256 const& key, + virtual h256 get_account_storage(addr_t const& addr, u256 const& key, std::optional blk_n = {}) const = 0; /** * @brief Returns code at a given address. diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index a8122436b4..34d56220b5 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -37,7 +37,7 @@ class StateAPI { void update_state_config(const Config& new_config); std::optional get_account(EthBlockNumber blk_num, const addr_t& addr) const; - u256 get_account_storage(EthBlockNumber blk_num, const addr_t& addr, const u256& key) const; + h256 get_account_storage(EthBlockNumber blk_num, const addr_t& addr, const u256& key) const; bytes get_code_by_address(EthBlockNumber blk_num, const addr_t& addr) const; ExecutionResult dry_run_transaction(EthBlockNumber blk_num, const EVMBlock& blk, const EVMTransaction& trx) const; bytes trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trx, diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 4c339f6c5b..b9ae9a2fe2 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -387,7 +387,7 @@ class FinalChainImpl final : public FinalChain { state_api_.update_state_config(new_config); } - u256 get_account_storage(addr_t const& addr, u256 const& key, + h256 get_account_storage(addr_t const& addr, u256 const& key, std::optional blk_n = {}) const override { return state_api_.get_account_storage(last_if_absent(blk_n), addr, key); } diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index 482084c1d2..116114dfa3 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -27,6 +27,8 @@ void to_bytes(taraxa_evm_Bytes b, bytes& result) { result.assign(b.Data, b.Data void to_u256(taraxa_evm_Bytes b, u256& result) { result = fromBigEndian(map_bytes(b)); } +void to_h256(taraxa_evm_Bytes b, h256& result) { result = h256(fromBigEndian(map_bytes(b))); } + template taraxa_evm_BytesCallback decoder_cb_c(Result& res) { return { @@ -147,8 +149,8 @@ std::optional StateAPI::get_account(EthBlockNumber blk_num, const addr_ return c_method_args_rlp, from_rlp, taraxa_evm_state_api_get_account>(this_c_, blk_num, addr); } -u256 StateAPI::get_account_storage(EthBlockNumber blk_num, const addr_t& addr, const u256& key) const { - return c_method_args_rlp(this_c_, blk_num, addr, key); +h256 StateAPI::get_account_storage(EthBlockNumber blk_num, const addr_t& addr, const u256& key) const { + return c_method_args_rlp(this_c_, blk_num, addr, key); } bytes StateAPI::get_code_by_address(EthBlockNumber blk_num, const addr_t& addr) const { From 2c1ef7a0aa94042036dc7a287853d30dbfbbba80 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 24 Jan 2024 16:48:21 +0100 Subject: [PATCH 61/72] chore: fix cpp-check --- CMakeModules/cppcheck.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index 2cd752ccc1..1472b400d8 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -26,6 +26,7 @@ else () --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/vrf_wrapper.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/UPnP.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/logger.cpp + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/state_api.cpp # TODO remove this when we solve correct exit of programs --suppress=localMutex:${PROJECT_SOURCE_DIR}/*/main.cpp # Just style warning From 1800adfacd9fdc588ad221053e1de128085af614 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 24 Jan 2024 12:56:26 -0800 Subject: [PATCH 62/72] order blocks stats after aspen hardfork rt one --- .../include/rewards/rewards_stats.hpp | 2 +- .../consensus/src/rewards/rewards_stats.cpp | 36 +++++++++++++++---- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp index 3449aefc30..b5542bfb09 100644 --- a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp @@ -47,7 +47,7 @@ class Stats { void clear(); const uint32_t kCommitteeSize; - const HardforksConfig kHardforks; + const HardforksConfig kHardforksConfig; std::shared_ptr db_; const std::function dpos_eligible_total_vote_count_; std::unordered_map blocks_stats_; diff --git a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp index 0fdd8988c9..a272fac71c 100644 --- a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp @@ -6,7 +6,7 @@ namespace taraxa::rewards { Stats::Stats(uint32_t committee_size, const HardforksConfig& hardforks, std::shared_ptr db, std::function&& dpos_eligible_total_vote_count) : kCommitteeSize(committee_size), - kHardforks(hardforks), + kHardforksConfig(hardforks), db_(std::move(db)), dpos_eligible_total_vote_count_(dpos_eligible_total_vote_count) { loadFromDb(); @@ -28,7 +28,7 @@ void Stats::saveBlockStats(uint64_t period, const BlockStats& stats, DbStorage:: } uint32_t Stats::getCurrentDistributionFrequency(uint64_t current_block) const { - auto distribution_frequencies = kHardforks.rewards_distribution_frequency; + auto distribution_frequencies = kHardforksConfig.rewards_distribution_frequency; auto itr = distribution_frequencies.upper_bound(current_block); if (distribution_frequencies.empty() || itr == distribution_frequencies.begin()) { return 1; @@ -49,7 +49,7 @@ BlockStats Stats::getBlockStats(const PeriodData& blk, const std::vector& if (!blk.previous_block_cert_votes.empty()) [[likely]] { dpos_vote_count = dpos_eligible_total_vote_count_(blk.previous_block_cert_votes[0]->getPeriod() - 1); } - if (blk.pbft_blk->getPeriod() < kHardforks.magnolia_hf.block_num) { + if (blk.pbft_blk->getPeriod() < kHardforksConfig.magnolia_hf.block_num) { return BlockStats{blk, {}, dpos_vote_count, kCommitteeSize}; } @@ -58,10 +58,10 @@ BlockStats Stats::getBlockStats(const PeriodData& blk, const std::vector& std::vector Stats::processStats(const PeriodData& current_blk, const std::vector& trxs_gas_used, DbStorage::Batch& write_batch) { - std::vector res; const auto current_period = current_blk.pbft_blk->getPeriod(); const auto frequency = getCurrentDistributionFrequency(current_period); auto block_stats = getBlockStats(current_blk, trxs_gas_used); + // Distribute rewards every block if (frequency == 1) { return {block_stats}; @@ -75,9 +75,31 @@ std::vector Stats::processStats(const PeriodData& current_blk, const return {}; } - res.reserve(blocks_stats_.size()); - std::transform(blocks_stats_.begin(), blocks_stats_.end(), std::back_inserter(res), - [](auto& t) { return std::move(t.second); }); + // Transform ordered (or unordered) blocks stats into vector + auto transformStatsToVector = [](auto&& blocks_stats) { + std::vector stats_vec; + stats_vec.reserve(blocks_stats.size()); + + std::transform(std::make_move_iterator(blocks_stats.begin()), std::make_move_iterator(blocks_stats.end()), + std::back_inserter(stats_vec), [](auto&& t) { return std::move(t.second); }); + return stats_vec; + }; + + std::vector res; + + // Blocks stats were not sorted by period before aspen hardfork part one + if (current_period < kHardforksConfig.aspen_hf.block_num_part_one) { + res = transformStatsToVector(std::move(blocks_stats_)); + } else { + // Blocks stats are sorted by period after aspen hardfork part one + std::map ordered_blocks_stats; + std::transform(std::make_move_iterator(blocks_stats_.begin()), std::make_move_iterator(blocks_stats_.end()), + std::inserter(ordered_blocks_stats, ordered_blocks_stats.end()), + [](auto&& t) { return std::move(t); }); + + res = transformStatsToVector(std::move(ordered_blocks_stats)); + } + clear(); return res; } From 20afac5d82be584b08b15301b1bdea965284ddd5 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Tue, 30 Jan 2024 11:45:13 +0100 Subject: [PATCH 63/72] chore: dag rewards --- libraries/config/include/config/hardfork.hpp | 3 + .../consensus/include/rewards/block_stats.hpp | 20 +++- .../consensus/src/rewards/block_stats.cpp | 38 +++++-- .../consensus/src/rewards/rewards_stats.cpp | 3 +- libraries/core_libs/node/src/node.cpp | 2 + tests/rewards_stats_test.cpp | 99 +++++++++++++++++++ 6 files changed, 156 insertions(+), 9 deletions(-) diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 62225bacb6..b79e839eb8 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -71,6 +71,9 @@ struct HardforksConfig { // Aspen hardfork implements new yield curve AspenHardfork aspen_hf; + bool isAspenHardforkPartOne(uint64_t block_number) const { return block_number >= aspen_hf.block_num_part_one; } + bool isAspenHardforkPartTwo(uint64_t block_number) const { return block_number >= aspen_hf.block_num_part_two; } + HAS_RLP_FIELDS }; diff --git a/libraries/core_libs/consensus/include/rewards/block_stats.hpp b/libraries/core_libs/consensus/include/rewards/block_stats.hpp index 1d992600af..d90e032d0d 100644 --- a/libraries/core_libs/consensus/include/rewards/block_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/block_stats.hpp @@ -22,9 +22,10 @@ class BlockStats { * * @param dpos_vote_count - votes count for previous block * @param committee_size + * @param aspen_dag_rewards - aspen dag rewards */ BlockStats(const PeriodData& block, const std::vector& trxs_gas_used, uint64_t dpos_vote_count, - uint32_t committee_size); + uint32_t committee_size, const bool aspen_dag_rewards = false); HAS_RLP_FIELDS @@ -33,8 +34,23 @@ class BlockStats { * @brief Process PeriodData and save stats in class for future serialization. returns * * @param block + * @param aspen_dag_rewards */ - void processStats(const PeriodData& block); + void processStats(const PeriodData& block, const bool aspen_dag_rewards); + + /** + * @brief Process Dag blocks and save stats in class for future serialization. returns + * + * @param block + */ + void processDagBlocks(const PeriodData& block); + + /** + * @brief Process Dag blocks and save stats in class for future serialization with aspen HF changes. returns + * + * @param block + */ + void processDagBlocksAspen(const PeriodData& block); /** * @brief Prepare fee_by_trx_hash_ map with trx fee by trx hash diff --git a/libraries/core_libs/consensus/src/rewards/block_stats.cpp b/libraries/core_libs/consensus/src/rewards/block_stats.cpp index 5888ee4c88..342cce8c8e 100644 --- a/libraries/core_libs/consensus/src/rewards/block_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/block_stats.cpp @@ -7,11 +7,11 @@ namespace taraxa::rewards { BlockStats::BlockStats(const PeriodData& block, const std::vector& trxs_gas_used, uint64_t dpos_vote_count, - uint32_t committee_size) + uint32_t committee_size, const bool aspen_dag_reward) : block_author_(block.pbft_blk->getBeneficiary()), max_votes_weight_(std::min(committee_size, dpos_vote_count)) { initFeeByTrxHash(block.transactions, trxs_gas_used); - processStats(block); + processStats(block, aspen_dag_reward); } void BlockStats::initFeeByTrxHash(const SharedTransactions& transactions, const std::vector& trxs_gas_used) { @@ -67,13 +67,24 @@ std::set toTrxHashesSet(const SharedTransactions& transactions) { return block_transactions_hashes_; } -void BlockStats::processStats(const PeriodData& block) { +void BlockStats::processStats(const PeriodData& block, const bool aspen_dag_rewards) { // total unique transactions count should be always equal to transactions count in block assert(fee_by_trx_hash_.size() == block.transactions.size()); validators_stats_.reserve(std::max(block.dag_blocks.size(), block.previous_block_cert_votes.size())); - auto block_transactions_hashes_ = toTrxHashesSet(block.transactions); + if (aspen_dag_rewards) { + processDagBlocksAspen(block); + } else { + processDagBlocks(block); + } + for (const auto& vote : block.previous_block_cert_votes) { + addVote(vote); + } +} + +void BlockStats::processDagBlocks(const PeriodData& block) { + auto block_transactions_hashes_ = toTrxHashesSet(block.transactions); for (const auto& dag_block : block.dag_blocks) { const addr_t& dag_block_author = dag_block.getSender(); bool has_unique_transactions = false; @@ -94,9 +105,24 @@ void BlockStats::processStats(const PeriodData& block) { total_dag_blocks_count_ += 1; } } +} - for (const auto& vote : block.previous_block_cert_votes) { - addVote(vote); +void BlockStats::processDagBlocksAspen(const PeriodData& block) { + uint32_t min_difficulty = UINT32_MAX; + for (const auto& dag_block : block.dag_blocks) { + if (dag_block.getDifficulty() < min_difficulty) { + min_difficulty = dag_block.getDifficulty(); + } + } + for (const auto& dag_block : block.dag_blocks) { + const addr_t& dag_block_author = dag_block.getSender(); + if (dag_block.getDifficulty() == min_difficulty) { + validators_stats_[dag_block_author].dag_blocks_count_ += 1; + total_dag_blocks_count_ += 1; + } + for (const auto& tx_hash : dag_block.getTrxs()) { + addTransaction(tx_hash, dag_block_author); + } } } diff --git a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp index a272fac71c..f27bf3abec 100644 --- a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp @@ -53,7 +53,8 @@ BlockStats Stats::getBlockStats(const PeriodData& blk, const std::vector& return BlockStats{blk, {}, dpos_vote_count, kCommitteeSize}; } - return BlockStats{blk, trxs_fees, dpos_vote_count, kCommitteeSize}; + const auto aspen_hf_part_two = kHardforksConfig.isAspenHardforkPartTwo(blk.pbft_blk->getPeriod()); + return BlockStats{blk, trxs_fees, dpos_vote_count, kCommitteeSize, aspen_hf_part_two}; } std::vector Stats::processStats(const PeriodData& current_blk, const std::vector& trxs_gas_used, diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index 374a26310c..a03455db95 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -431,6 +431,8 @@ void FullNode::rebuildDb() { } stop_async = true; fut.wait(); + // Handles the race case if some blocks are still in the queue + pbft_mgr_->pushSyncedPbftBlocksIntoChain(); LOG(log_si_) << "Rebuild completed"; } diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp index 68b3e69bca..03e2ca7a72 100644 --- a/tests/rewards_stats_test.cpp +++ b/tests/rewards_stats_test.cpp @@ -227,6 +227,105 @@ TEST_F(RewardsStatsTest, feeRewards) { } } +TEST_F(RewardsStatsTest, dagBlockRewards) { + auto db = std::make_shared(data_dir / "db"); + auto batch = db->createWriteBatch(); + + std::vector> empty_votes; + HardforksConfig hfc; + hfc.aspen_hf.block_num_part_two = 4; + + // Create two reward stats to test before and after aspen hardfork part 2 + rewards::Stats pre_aspen_reward_stats(100, HardforksConfig{0, 0, {}, {}, MagnoliaHardfork{0, 0}, AspenHardfork{1, 6}}, + db, [](auto) { return 100; }); + rewards::Stats post_aspen_reward_stats( + 100, HardforksConfig{0, 0, {}, {}, MagnoliaHardfork{0, 0}, AspenHardfork{1, 4}}, db, [](auto) { return 100; }); + + // Create pbft block with 5 dag blocks + auto dag_key1 = dev::KeyPair::create(); + auto dag_key2 = dev::KeyPair::create(); + auto dag_key3 = dev::KeyPair::create(); + auto dag_key4 = dev::KeyPair::create(); + auto dag_key5 = dev::KeyPair::create(); + vrf_wrapper::vrf_sk_t vrfs( + "854821a22e1841f79f0a62409197e930eb347c05ede6456b82b07ec36acbd2fce86c6f2cd1e076ddf8eaf48cee078bd68b74063c3e229b1a" + "5e993c791bdb56d6"); + auto trxs = samples::createSignedTrxSamples(1, 3, g_secret); + + PeriodData block(make_simple_pbft_block(blk_hash_t(1), 5), empty_votes); + SortitionParams sortition_params(0xfff, 16, 21, 23, 0x64); + + vdf_sortition::VdfSortition vdf1(sortition_params, vrfs, + vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(1)), 1, 1); + DagBlock dag_blk1({}, {}, {}, {trxs[0]->getHash()}, 0, vdf1, dag_key1.secret()); + block.dag_blocks.push_back(dag_blk1); + + vdf_sortition::VdfSortition vdf2(sortition_params, vrfs, + vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(2)), 1, 1); + DagBlock dag_blk2({}, {}, {}, {trxs[1]->getHash()}, 0, vdf2, dag_key2.secret()); + block.dag_blocks.push_back(dag_blk2); + + vdf_sortition::VdfSortition vdf3(sortition_params, vrfs, + vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(3)), 1, 1); + DagBlock dag_blk3({}, {}, {}, {trxs[0]->getHash()}, 0, vdf3, dag_key3.secret()); + block.dag_blocks.push_back(dag_blk3); + + vdf_sortition::VdfSortition vdf4(sortition_params, vrfs, + vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(4)), 1, 1); + DagBlock dag_blk4({}, {}, {}, {trxs[1]->getHash()}, 0, vdf4, dag_key4.secret()); + block.dag_blocks.push_back(dag_blk4); + + vdf_sortition::VdfSortition vdf5(sortition_params, vrfs, + vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(5)), 1, 1); + DagBlock dag_blk5({}, {}, {}, {trxs[2]->getHash()}, 0, vdf5, dag_key5.secret()); + block.dag_blocks.push_back(dag_blk5); + block.transactions = trxs; + + ASSERT_EQ(dag_blk1.getDifficulty(), 17); + ASSERT_EQ(dag_blk2.getDifficulty(), 17); + ASSERT_EQ(dag_blk3.getDifficulty(), 16); + ASSERT_EQ(dag_blk4.getDifficulty(), 17); + ASSERT_EQ(dag_blk5.getDifficulty(), 16); + + std::vector gas_used{10, 20, 30}; + + // Process rewards before aspen hf, expect dag_blocks_count to match blocks that include unique transactions which is + // blocks 1, 2 and 5 + auto stats = pre_aspen_reward_stats.processStats(block, gas_used, batch); + ASSERT_EQ(stats.size(), 1); + auto stats_with_get = reinterpret_cast(&stats[0]); + ASSERT_EQ(stats_with_get->getValidatorStats().size(), 3); + ASSERT_TRUE(stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key1.pub()))); + ASSERT_TRUE(stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key2.pub()))); + ASSERT_TRUE(stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key5.pub()))); + ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key1.pub()))->second.dag_blocks_count_, 1); + ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key2.pub()))->second.dag_blocks_count_, 1); + ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key5.pub()))->second.dag_blocks_count_, 1); + + // Process rewards after aspen hf, expect dag_blocks_count to match blocks with smallest difficulty which is blocks 3 + // and 5 Verify fees rewards to be the same before and after the HF + auto post_stats = post_aspen_reward_stats.processStats(block, gas_used, batch); + ASSERT_EQ(post_stats.size(), 1); + auto post_stats_with_get = reinterpret_cast(&post_stats[0]); + ASSERT_EQ(post_stats_with_get->getValidatorStats().size(), 4); + ASSERT_TRUE(post_stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key1.pub()))); + ASSERT_TRUE(post_stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key2.pub()))); + ASSERT_TRUE(post_stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key3.pub()))); + ASSERT_TRUE(post_stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key5.pub()))); + ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key1.pub()))->second.dag_blocks_count_, 0); + ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key2.pub()))->second.dag_blocks_count_, 0); + ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key3.pub()))->second.dag_blocks_count_, 1); + ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key5.pub()))->second.dag_blocks_count_, 1); + + ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key1.pub()))->second.fees_rewards_, + post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key1.pub()))->second.fees_rewards_); + ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key2.pub()))->second.fees_rewards_, + post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key2.pub()))->second.fees_rewards_); + ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key5.pub()))->second.fees_rewards_, + post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key5.pub()))->second.fees_rewards_); + ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key3.pub()))->second.fees_rewards_, 0); +} + } // namespace taraxa::core_tests using namespace taraxa; From 3723e2d9b4b1bf4e867655d2f9de76341b11dd48 Mon Sep 17 00:00:00 2001 From: Leonard Mocanu Date: Tue, 6 Feb 2024 12:58:32 +0200 Subject: [PATCH 64/72] chore: adds automatic external ip detection --- docker-entrypoint.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index a61dc454ea..1fc521d3e9 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -16,6 +16,9 @@ else if [[ -z "${ADVERTISED_IP}" ]]; then echo "ADVERTISED_IP is not set." else + if [ "$ADVERTISED_IP" = "auto" ]; then + ADVERTISED_IP=$(curl icanhazip.com 2>/dev/null) + fi FLAGS="--public-ip ${ADVERTISED_IP}" fi From 32d15d1fa7c2707610463dcfd138f7fe96bf0c29 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 12 Feb 2024 13:15:40 +0100 Subject: [PATCH 65/72] feat: add support for announcing external port --- docker-entrypoint.sh | 2 +- libraries/aleth/libp2p/Host.cpp | 2 +- libraries/aleth/libp2p/Network.h | 7 +++++-- libraries/aleth/libp2p/NodeTable.cpp | 10 ++++++++-- libraries/aleth/libp2p/NodeTable.h | 15 ++++++++++----- libraries/cli/include/cli/config.hpp | 1 + libraries/cli/src/config.cpp | 4 ++++ libraries/config/include/config/network.hpp | 1 + libraries/core_libs/network/src/network.cpp | 1 + programs/taraxa-bootnode/main.cpp | 7 +++++-- 10 files changed, 37 insertions(+), 13 deletions(-) diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index 1fc521d3e9..f7ac1e62ab 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -19,7 +19,7 @@ else if [ "$ADVERTISED_IP" = "auto" ]; then ADVERTISED_IP=$(curl icanhazip.com 2>/dev/null) fi - FLAGS="--public-ip ${ADVERTISED_IP}" + FLAGS="--public-ip ${ADVERTISED_IP} --public-port" fi ADVERTISED_PORT_NAME="ADVERTISED_PORT_$INDEX" diff --git a/libraries/aleth/libp2p/Host.cpp b/libraries/aleth/libp2p/Host.cpp index 1e773acf73..2067e0f940 100644 --- a/libraries/aleth/libp2p/Host.cpp +++ b/libraries/aleth/libp2p/Host.cpp @@ -83,7 +83,7 @@ Host::Host(std::string _clientVersion, KeyPair const& kp, NetworkConfig _n, Tara m_nodeTable = make_unique( ioc_, m_alias, NodeIPEndpoint(bi::make_address(listenAddress()), listenPort(), listenPort()), updateENR(enr, m_tcpPublic, listenPort()), m_netConfig.discovery, m_netConfig.allowLocalDiscovery, - taraxa_conf_.is_boot_node, taraxa_conf_.chain_id); + taraxa_conf_.is_boot_node, m_netConfig.announcePublicPort, taraxa_conf_.chain_id); m_nodeTable->setEventHandler(new NodeTableEventHandler([this](auto const&... args) { onNodeTableEvent(args...); })); if (restored_state) { for (auto const& node : restored_state->known_nodes) { diff --git a/libraries/aleth/libp2p/Network.h b/libraries/aleth/libp2p/Network.h index 78a2d92d80..5011a9caa3 100644 --- a/libraries/aleth/libp2p/Network.h +++ b/libraries/aleth/libp2p/Network.h @@ -37,12 +37,14 @@ struct NetworkConfig { // Network Preferences with intended Public IP NetworkConfig(std::string const& _publicIP, std::string const& _listenAddress = std::string(), - unsigned short _listenPort = c_defaultListenPort, bool _upnp = true, bool _allowLocalDiscovery = false) + unsigned short _listenPort = c_defaultListenPort, bool _upnp = true, bool _allowLocalDiscovery = false, + bool publicPort = false) : publicIPAddress(_publicIP), listenIPAddress(_listenAddress), listenPort(_listenPort), traverseNAT(_upnp), - allowLocalDiscovery(_allowLocalDiscovery) { + allowLocalDiscovery(_allowLocalDiscovery), + announcePublicPort(publicPort) { if (!publicIPAddress.empty() && !isPublicAddress(publicIPAddress)) BOOST_THROW_EXCEPTION(InvalidPublicIPAddress()); } @@ -58,6 +60,7 @@ struct NetworkConfig { bool discovery = true; // Discovery is activated with network. bool allowLocalDiscovery = false; // Include nodes with local IP addresses in the discovery process. bool pin = false; // Only accept or connect to trusted peers. + bool announcePublicPort = false; // Announce public port in PING msg }; /** diff --git a/libraries/aleth/libp2p/NodeTable.cpp b/libraries/aleth/libp2p/NodeTable.cpp index b6e00d7d08..dfb8f3aac4 100644 --- a/libraries/aleth/libp2p/NodeTable.cpp +++ b/libraries/aleth/libp2p/NodeTable.cpp @@ -33,7 +33,8 @@ inline bool operator==(weak_ptr const& _weak, shared_ptr c } NodeTable::NodeTable(ba::io_context& _io, KeyPair const& _alias, NodeIPEndpoint const& _endpoint, ENR const& _enr, - bool _enabled, bool _allowLocalDiscovery, bool is_boot_node, unsigned chain_id) + bool _enabled, bool _allowLocalDiscovery, bool is_boot_node, bool announce_udp_port, + unsigned chain_id) : strand_(ba::make_strand(_io)), m_hostNodeID{_alias.pub()}, m_hostNodeIDHash{sha3(m_hostNodeID)}, @@ -48,7 +49,8 @@ NodeTable::NodeTable(ba::io_context& _io, KeyPair const& _alias, NodeIPEndpoint m_timeoutsTimer{make_shared(_io)}, m_endpointTrackingTimer{make_shared(_io)}, is_boot_node_(is_boot_node), - chain_id_(chain_id) { + chain_id_(chain_id), + announce_upd_port_(announce_udp_port) { if (is_boot_node_) { s_bucketSize = BOOT_NODE_BUCKET_SIZE; } @@ -260,6 +262,7 @@ void NodeTable::ping(Node const& _node, shared_ptr _replacementNodeEn PingNode p{m_hostNodeEndpoint, _node.get_endpoint(), chain_id_}; p.expiration = nextRequestExpirationTime(); p.seq = m_hostENR.sequenceNumber(); + if (announce_upd_port_) p.use_udp_port = true; auto const pingHash = p.sign(m_secret); LOG(m_logger) << p.typeName() << " to " << _node; m_socket->send(p); @@ -568,6 +571,9 @@ NodeIPEndpoint NodeTable::getSourceEndpoint(bi::udp::endpoint const& from, PingN return m_ipMappings[from]; } } + if (packet.use_udp_port.has_value() && packet.use_udp_port) { + return {from.address(), packet.source.udpPort(), packet.source.tcpPort()}; + } return {from.address(), from.port(), packet.source.tcpPort()}; } diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index 5d6f97f480..acfae2a482 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -109,7 +109,8 @@ class NodeTable : UDPSocketEvents { /// Constructor requiring host for I/O, credentials, and IP Address, port to /// listen on and host ENR. NodeTable(ba::io_context& _io, KeyPair const& _alias, NodeIPEndpoint const& _endpoint, ENR const& _enr, - bool _enabled = true, bool _allowLocalDiscovery = false, bool is_boot_node = false, unsigned chain_id = 0); + bool _enabled = true, bool _allowLocalDiscovery = false, bool is_boot_node = false, + bool announce_udp_port = false, unsigned chain_id = 0); ~NodeTable() { if (m_socket->isOpen()) { @@ -384,6 +385,7 @@ class NodeTable : UDPSocketEvents { const bool is_boot_node_ = false; const uint32_t chain_id_ = 0; + const bool announce_upd_port_ = false; }; /** @@ -468,16 +470,18 @@ struct PingNode : DiscoveryDatagram { unsigned chain_id = 0; NodeIPEndpoint source; NodeIPEndpoint destination; - boost::optional seq; + uint64_t seq; + std::optional use_udp_port; void streamRLP(RLPStream& _s) const override { - _s.appendList(seq.is_initialized() ? 5 : 4); + _s.appendList(use_udp_port.has_value() ? 6 : 5); _s << dev::p2p::c_protocolVersion; _s << chain_id; source.streamRLP(_s); destination.streamRLP(_s); _s << *expiration; - if (seq.is_initialized()) _s << *seq; + _s << seq; + if (use_udp_port.has_value()) _s << *use_udp_port; } void interpretRLP(bytesConstRef _bytes) override { @@ -487,7 +491,8 @@ struct PingNode : DiscoveryDatagram { source.interpretRLP(r[2]); destination.interpretRLP(r[3]); expiration = r[4].toInt(); - if (r.itemCount() > 5 && r[5].isInt()) seq = r[5].toInt(); + seq = r[5].toInt(); + if (r.itemCount() > 6 && r[6].isInt()) use_udp_port = r[6].toInt(); } std::string typeName() const override { return "Ping"; } diff --git a/libraries/cli/include/cli/config.hpp b/libraries/cli/include/cli/config.hpp index 41aa55e7e4..72977cff04 100644 --- a/libraries/cli/include/cli/config.hpp +++ b/libraries/cli/include/cli/config.hpp @@ -48,6 +48,7 @@ class Config { static constexpr const char* CONFIG_COMMAND = "config"; static constexpr const char* BOOT_NODES = "boot-nodes"; static constexpr const char* PUBLIC_IP = "public-ip"; + static constexpr const char* PUBLIC_PORT = "public-port"; static constexpr const char* PORT = "port"; static constexpr const char* LOG_CHANNELS = "log-channels"; static constexpr const char* LOG_CONFIGURATIONS = "log-configurations"; diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index 3023f3fbbe..ab8c32151a 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -26,6 +26,7 @@ Config::Config(int argc, const char* argv[]) { std::vector command; std::vector boot_nodes; std::string public_ip; + bool public_port = false; uint16_t port = 0; std::vector log_channels; std::vector log_configurations; @@ -108,6 +109,8 @@ Config::Config(int argc, const char* argv[]) { "Boot nodes to connect to in addition to boot nodes defined in config: [ip_address:port_number/node_id, ....]"); node_command_options.add_options()(PUBLIC_IP, bpo::value(&public_ip), "Force advertised public IP to the given IP (default: auto)"); + node_command_options.add_options()(PUBLIC_PORT, bpo::bool_switch(&public_port), + "Force advertised public port (default: false)"); node_command_options.add_options()(PORT, bpo::value(&port), "Listen on the given port for incoming connections"); node_command_options.add_options()(LOG_CHANNELS, bpo::value>(&log_channels)->multitoken(), @@ -264,6 +267,7 @@ Config::Config(int argc, const char* argv[]) { } if (!public_ip.empty()) { node_config_.network.public_ip = public_ip; + node_config_.network.public_port = public_port; } if (port) { node_config_.network.listen_port = port; diff --git a/libraries/config/include/config/network.hpp b/libraries/config/include/config/network.hpp index 49253ca962..a863a86797 100644 --- a/libraries/config/include/config/network.hpp +++ b/libraries/config/include/config/network.hpp @@ -61,6 +61,7 @@ struct NetworkConfig { std::string json_file_name; std::string public_ip; + bool public_port = false; std::string listen_ip = "127.0.0.1"; uint16_t listen_port = 0; std::vector boot_nodes; diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index b5ecbed955..f750e052f8 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -55,6 +55,7 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi net_conf.allowLocalDiscovery = true; net_conf.traverseNAT = false; net_conf.publicIPAddress = config.network.public_ip; + net_conf.announcePublicPort = config.network.public_port; net_conf.pin = false; dev::p2p::TaraxaNetworkConfig taraxa_net_conf; diff --git a/programs/taraxa-bootnode/main.cpp b/programs/taraxa-bootnode/main.cpp index d4ac4b0e64..7b81b3535a 100644 --- a/programs/taraxa-bootnode/main.cpp +++ b/programs/taraxa-bootnode/main.cpp @@ -86,6 +86,7 @@ dev::KeyPair getKey(std::string const& path) { int main(int argc, char** argv) { bool denyLocalDiscovery; + bool public_port; std::string wallet; po::options_description general_options("GENERAL OPTIONS", kLineWidth); @@ -100,6 +101,7 @@ int main(int argc, char** argv) { auto addNetworkingOption = client_networking.add_options(); addNetworkingOption("public-ip", po::value()->value_name(""), "Force advertised public IP to the given IP (default: auto)"); + addNetworkingOption("public-port", po::bool_switch(&public_port), "Force advertised public port (default: false)"); addNetworkingOption("listen-ip", po::value()->value_name(""), "Listen on the given IP for incoming connections (default: 0.0.0.0)"); addNetworkingOption("listen", po::value()->value_name(""), @@ -167,8 +169,9 @@ int main(int argc, char** argv) { } } - auto net_conf = public_ip.empty() ? dev::p2p::NetworkConfig(listen_ip, listen_port, false) - : dev::p2p::NetworkConfig(public_ip, listen_ip, listen_port, false); + auto net_conf = public_ip.empty() + ? dev::p2p::NetworkConfig(listen_ip, listen_port, false) + : dev::p2p::NetworkConfig(public_ip, listen_ip, listen_port, false, false, public_port); net_conf.allowLocalDiscovery = !denyLocalDiscovery; dev::p2p::TaraxaNetworkConfig taraxa_net_conf; From 7e1d71d91c71999680b1b602db4c12aa19d0e813 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 12 Feb 2024 14:04:32 +0100 Subject: [PATCH 66/72] bugfix: fixed PING rpl encoding --- libraries/aleth/libp2p/NodeTable.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index acfae2a482..23ae85d10e 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -474,7 +474,7 @@ struct PingNode : DiscoveryDatagram { std::optional use_udp_port; void streamRLP(RLPStream& _s) const override { - _s.appendList(use_udp_port.has_value() ? 6 : 5); + _s.appendList(use_udp_port.has_value() ? 7 : 6); _s << dev::p2p::c_protocolVersion; _s << chain_id; source.streamRLP(_s); @@ -492,7 +492,7 @@ struct PingNode : DiscoveryDatagram { destination.interpretRLP(r[3]); expiration = r[4].toInt(); seq = r[5].toInt(); - if (r.itemCount() > 6 && r[6].isInt()) use_udp_port = r[6].toInt(); + if (r.itemCount() > 6 && r[6].isInt()) use_udp_port = r[6].toInt(); } std::string typeName() const override { return "Ping"; } From 0665ea154fb7e407a6ea58e231aa005b9c2f3d17 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 12 Feb 2024 16:05:22 +0100 Subject: [PATCH 67/72] chore: remove seq from ping --- libraries/aleth/libp2p/NodeTable.cpp | 1 - libraries/aleth/libp2p/NodeTable.h | 7 ++----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/libraries/aleth/libp2p/NodeTable.cpp b/libraries/aleth/libp2p/NodeTable.cpp index dfb8f3aac4..b9cab740f8 100644 --- a/libraries/aleth/libp2p/NodeTable.cpp +++ b/libraries/aleth/libp2p/NodeTable.cpp @@ -261,7 +261,6 @@ void NodeTable::ping(Node const& _node, shared_ptr _replacementNodeEn PingNode p{m_hostNodeEndpoint, _node.get_endpoint(), chain_id_}; p.expiration = nextRequestExpirationTime(); - p.seq = m_hostENR.sequenceNumber(); if (announce_upd_port_) p.use_udp_port = true; auto const pingHash = p.sign(m_secret); LOG(m_logger) << p.typeName() << " to " << _node; diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index 23ae85d10e..f84f2b3e8e 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -470,17 +470,15 @@ struct PingNode : DiscoveryDatagram { unsigned chain_id = 0; NodeIPEndpoint source; NodeIPEndpoint destination; - uint64_t seq; std::optional use_udp_port; void streamRLP(RLPStream& _s) const override { - _s.appendList(use_udp_port.has_value() ? 7 : 6); + _s.appendList(use_udp_port.has_value() ? 6 : 5); _s << dev::p2p::c_protocolVersion; _s << chain_id; source.streamRLP(_s); destination.streamRLP(_s); _s << *expiration; - _s << seq; if (use_udp_port.has_value()) _s << *use_udp_port; } @@ -491,8 +489,7 @@ struct PingNode : DiscoveryDatagram { source.interpretRLP(r[2]); destination.interpretRLP(r[3]); expiration = r[4].toInt(); - seq = r[5].toInt(); - if (r.itemCount() > 6 && r[6].isInt()) use_udp_port = r[6].toInt(); + if (r.itemCount() > 5 && r[5].isInt()) use_udp_port = r[5].toInt(); } std::string typeName() const override { return "Ping"; } From 22780490972a94d32a7da85cd769349c0389be09 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 13 Feb 2024 14:39:04 +0100 Subject: [PATCH 68/72] feat: enable this flag to support specific port --- docker-entrypoint.sh | 4 ++-- libraries/aleth/libp2p/Host.cpp | 2 +- libraries/aleth/libp2p/Network.h | 10 +++++----- libraries/aleth/libp2p/NodeTable.cpp | 21 ++++++++++++--------- libraries/aleth/libp2p/NodeTable.h | 13 +++++++------ libraries/cli/src/config.cpp | 8 +++++--- libraries/config/include/config/network.hpp | 2 +- libraries/core_libs/network/src/network.cpp | 2 +- programs/taraxa-bootnode/main.cpp | 19 +++++++++++-------- 9 files changed, 45 insertions(+), 36 deletions(-) diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index f7ac1e62ab..383a98244e 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -19,7 +19,7 @@ else if [ "$ADVERTISED_IP" = "auto" ]; then ADVERTISED_IP=$(curl icanhazip.com 2>/dev/null) fi - FLAGS="--public-ip ${ADVERTISED_IP} --public-port" + FLAGS="--public-ip ${ADVERTISED_IP}" fi ADVERTISED_PORT_NAME="ADVERTISED_PORT_$INDEX" @@ -27,7 +27,7 @@ else if [[ -z "${ADVERTISED_PORT}" ]]; then echo "ADVERTISED_PORT is not set." else - FLAGS="$FLAGS --port ${ADVERTISED_PORT}" + FLAGS="$FLAGS --public-port ${ADVERTISED_PORT}" fi fi diff --git a/libraries/aleth/libp2p/Host.cpp b/libraries/aleth/libp2p/Host.cpp index 2067e0f940..a825d9f3c4 100644 --- a/libraries/aleth/libp2p/Host.cpp +++ b/libraries/aleth/libp2p/Host.cpp @@ -83,7 +83,7 @@ Host::Host(std::string _clientVersion, KeyPair const& kp, NetworkConfig _n, Tara m_nodeTable = make_unique( ioc_, m_alias, NodeIPEndpoint(bi::make_address(listenAddress()), listenPort(), listenPort()), updateENR(enr, m_tcpPublic, listenPort()), m_netConfig.discovery, m_netConfig.allowLocalDiscovery, - taraxa_conf_.is_boot_node, m_netConfig.announcePublicPort, taraxa_conf_.chain_id); + taraxa_conf_.is_boot_node, m_netConfig.publicPort, taraxa_conf_.chain_id); m_nodeTable->setEventHandler(new NodeTableEventHandler([this](auto const&... args) { onNodeTableEvent(args...); })); if (restored_state) { for (auto const& node : restored_state->known_nodes) { diff --git a/libraries/aleth/libp2p/Network.h b/libraries/aleth/libp2p/Network.h index 5011a9caa3..657478005b 100644 --- a/libraries/aleth/libp2p/Network.h +++ b/libraries/aleth/libp2p/Network.h @@ -37,14 +37,14 @@ struct NetworkConfig { // Network Preferences with intended Public IP NetworkConfig(std::string const& _publicIP, std::string const& _listenAddress = std::string(), - unsigned short _listenPort = c_defaultListenPort, bool _upnp = true, bool _allowLocalDiscovery = false, - bool publicPort = false) + uint16_t _listenPort = c_defaultListenPort, bool _upnp = true, bool _allowLocalDiscovery = false, + uint16_t publicPort = 0) : publicIPAddress(_publicIP), listenIPAddress(_listenAddress), listenPort(_listenPort), traverseNAT(_upnp), allowLocalDiscovery(_allowLocalDiscovery), - announcePublicPort(publicPort) { + publicPort(publicPort) { if (!publicIPAddress.empty() && !isPublicAddress(publicIPAddress)) BOOST_THROW_EXCEPTION(InvalidPublicIPAddress()); } @@ -52,7 +52,8 @@ struct NetworkConfig { std::string publicIPAddress; std::string listenIPAddress; - unsigned short listenPort = c_defaultListenPort; + uint16_t listenPort = c_defaultListenPort; + uint16_t publicPort = 0; // Announce public port in PING msg /// Preferences @@ -60,7 +61,6 @@ struct NetworkConfig { bool discovery = true; // Discovery is activated with network. bool allowLocalDiscovery = false; // Include nodes with local IP addresses in the discovery process. bool pin = false; // Only accept or connect to trusted peers. - bool announcePublicPort = false; // Announce public port in PING msg }; /** diff --git a/libraries/aleth/libp2p/NodeTable.cpp b/libraries/aleth/libp2p/NodeTable.cpp index b9cab740f8..59d898402d 100644 --- a/libraries/aleth/libp2p/NodeTable.cpp +++ b/libraries/aleth/libp2p/NodeTable.cpp @@ -3,6 +3,8 @@ // Licensed under the GNU General Public License, Version 3. #include "NodeTable.h" + +#include using namespace std; namespace dev { @@ -33,8 +35,8 @@ inline bool operator==(weak_ptr const& _weak, shared_ptr c } NodeTable::NodeTable(ba::io_context& _io, KeyPair const& _alias, NodeIPEndpoint const& _endpoint, ENR const& _enr, - bool _enabled, bool _allowLocalDiscovery, bool is_boot_node, bool announce_udp_port, - unsigned chain_id) + bool _enabled, bool _allowLocalDiscovery, bool is_boot_node, uint16_t public_port, + uint32_t chain_id) : strand_(ba::make_strand(_io)), m_hostNodeID{_alias.pub()}, m_hostNodeIDHash{sha3(m_hostNodeID)}, @@ -50,7 +52,7 @@ NodeTable::NodeTable(ba::io_context& _io, KeyPair const& _alias, NodeIPEndpoint m_endpointTrackingTimer{make_shared(_io)}, is_boot_node_(is_boot_node), chain_id_(chain_id), - announce_upd_port_(announce_udp_port) { + public_port_(public_port) { if (is_boot_node_) { s_bucketSize = BOOT_NODE_BUCKET_SIZE; } @@ -261,7 +263,7 @@ void NodeTable::ping(Node const& _node, shared_ptr _replacementNodeEn PingNode p{m_hostNodeEndpoint, _node.get_endpoint(), chain_id_}; p.expiration = nextRequestExpirationTime(); - if (announce_upd_port_) p.use_udp_port = true; + if (public_port_) p.public_port = public_port_; auto const pingHash = p.sign(m_secret); LOG(m_logger) << p.typeName() << " to " << _node; m_socket->send(p); @@ -555,6 +557,10 @@ std::shared_ptr NodeTable::handleFindNode(bi::udp::endpoint const& _f } NodeIPEndpoint NodeTable::getSourceEndpoint(bi::udp::endpoint const& from, PingNode const& packet) { + auto port = from.port(); + if (packet.public_port && *packet.public_port != 0) { + port = *packet.public_port; + } if (from.address() != packet.source.address() && !isLocalHostAddress(packet.source.address())) { if (isPrivateAddress(from.address()) && !isPrivateAddress(packet.source.address())) { Guard l(x_ips); @@ -566,14 +572,11 @@ NodeIPEndpoint NodeTable::getSourceEndpoint(bi::udp::endpoint const& from, PingN } else { m_id2IpMap[packet.sourceid] = from; } - m_ipMappings[from] = {packet.source.address(), packet.source.udpPort(), packet.source.tcpPort()}; + m_ipMappings[from] = {packet.source.address(), port, packet.source.tcpPort()}; return m_ipMappings[from]; } } - if (packet.use_udp_port.has_value() && packet.use_udp_port) { - return {from.address(), packet.source.udpPort(), packet.source.tcpPort()}; - } - return {from.address(), from.port(), packet.source.tcpPort()}; + return {from.address(), port, packet.source.tcpPort()}; } std::shared_ptr NodeTable::handlePingNode(bi::udp::endpoint const& _from, DiscoveryDatagram const& _packet) { diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index f84f2b3e8e..6e1f20ddf8 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -8,6 +8,7 @@ #include #include +#include #include "Common.h" #include "ENR.h" @@ -110,7 +111,7 @@ class NodeTable : UDPSocketEvents { /// listen on and host ENR. NodeTable(ba::io_context& _io, KeyPair const& _alias, NodeIPEndpoint const& _endpoint, ENR const& _enr, bool _enabled = true, bool _allowLocalDiscovery = false, bool is_boot_node = false, - bool announce_udp_port = false, unsigned chain_id = 0); + uint16_t public_port = 0, uint32_t chain_id = 0); ~NodeTable() { if (m_socket->isOpen()) { @@ -385,7 +386,7 @@ class NodeTable : UDPSocketEvents { const bool is_boot_node_ = false; const uint32_t chain_id_ = 0; - const bool announce_upd_port_ = false; + const uint16_t public_port_ = 0; }; /** @@ -470,16 +471,16 @@ struct PingNode : DiscoveryDatagram { unsigned chain_id = 0; NodeIPEndpoint source; NodeIPEndpoint destination; - std::optional use_udp_port; + std::optional public_port; void streamRLP(RLPStream& _s) const override { - _s.appendList(use_udp_port.has_value() ? 6 : 5); + _s.appendList(public_port.has_value() ? 6 : 5); _s << dev::p2p::c_protocolVersion; _s << chain_id; source.streamRLP(_s); destination.streamRLP(_s); _s << *expiration; - if (use_udp_port.has_value()) _s << *use_udp_port; + if (public_port.has_value()) _s << *public_port; } void interpretRLP(bytesConstRef _bytes) override { @@ -489,7 +490,7 @@ struct PingNode : DiscoveryDatagram { source.interpretRLP(r[2]); destination.interpretRLP(r[3]); expiration = r[4].toInt(); - if (r.itemCount() > 5 && r[5].isInt()) use_udp_port = r[5].toInt(); + if (r.itemCount() > 5 && r[5].isInt()) public_port = r[5].toInt(); } std::string typeName() const override { return "Ping"; } diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index ab8c32151a..201a83ae30 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -26,7 +26,7 @@ Config::Config(int argc, const char* argv[]) { std::vector command; std::vector boot_nodes; std::string public_ip; - bool public_port = false; + uint16_t public_port = 0; uint16_t port = 0; std::vector log_channels; std::vector log_configurations; @@ -109,8 +109,8 @@ Config::Config(int argc, const char* argv[]) { "Boot nodes to connect to in addition to boot nodes defined in config: [ip_address:port_number/node_id, ....]"); node_command_options.add_options()(PUBLIC_IP, bpo::value(&public_ip), "Force advertised public IP to the given IP (default: auto)"); - node_command_options.add_options()(PUBLIC_PORT, bpo::bool_switch(&public_port), - "Force advertised public port (default: false)"); + node_command_options.add_options()(PUBLIC_PORT, bpo::value(&public_port), + "Force advertised public port (default: disabled)"); node_command_options.add_options()(PORT, bpo::value(&port), "Listen on the given port for incoming connections"); node_command_options.add_options()(LOG_CHANNELS, bpo::value>(&log_channels)->multitoken(), @@ -267,6 +267,8 @@ Config::Config(int argc, const char* argv[]) { } if (!public_ip.empty()) { node_config_.network.public_ip = public_ip; + } + if (public_port) { node_config_.network.public_port = public_port; } if (port) { diff --git a/libraries/config/include/config/network.hpp b/libraries/config/include/config/network.hpp index a863a86797..181e59c6ac 100644 --- a/libraries/config/include/config/network.hpp +++ b/libraries/config/include/config/network.hpp @@ -61,7 +61,7 @@ struct NetworkConfig { std::string json_file_name; std::string public_ip; - bool public_port = false; + uint16_t public_port = 0; std::string listen_ip = "127.0.0.1"; uint16_t listen_port = 0; std::vector boot_nodes; diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index f750e052f8..6666575aa3 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -55,7 +55,7 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi net_conf.allowLocalDiscovery = true; net_conf.traverseNAT = false; net_conf.publicIPAddress = config.network.public_ip; - net_conf.announcePublicPort = config.network.public_port; + net_conf.publicPort = config.network.public_port; net_conf.pin = false; dev::p2p::TaraxaNetworkConfig taraxa_net_conf; diff --git a/programs/taraxa-bootnode/main.cpp b/programs/taraxa-bootnode/main.cpp index 7b81b3535a..5e37f0418b 100644 --- a/programs/taraxa-bootnode/main.cpp +++ b/programs/taraxa-bootnode/main.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -86,7 +87,6 @@ dev::KeyPair getKey(std::string const& path) { int main(int argc, char** argv) { bool denyLocalDiscovery; - bool public_port; std::string wallet; po::options_description general_options("GENERAL OPTIONS", kLineWidth); @@ -101,14 +101,15 @@ int main(int argc, char** argv) { auto addNetworkingOption = client_networking.add_options(); addNetworkingOption("public-ip", po::value()->value_name(""), "Force advertised public IP to the given IP (default: auto)"); - addNetworkingOption("public-port", po::bool_switch(&public_port), "Force advertised public port (default: false)"); + addNetworkingOption("public-port", po::value()->value_name(""), + "Force advertised public port (default: disabled)"); addNetworkingOption("listen-ip", po::value()->value_name(""), "Listen on the given IP for incoming connections (default: 0.0.0.0)"); - addNetworkingOption("listen", po::value()->value_name(""), + addNetworkingOption("listen", po::value()->value_name(""), "Listen on the given port for incoming connections (default: 10002)"); addNetworkingOption("deny-local-discovery", po::bool_switch(&denyLocalDiscovery), "Reject local addresses in the discovery process. Used for testing purposes."); - addNetworkingOption("chain-id", po::value()->value_name(""), + addNetworkingOption("chain-id", po::value()->value_name(""), "Connect to default mainet/testnet/devnet bootnodes"); addNetworkingOption("number-of-threads", po::value()->value_name("<#>"), "Define number of threads for this bootnode (default: 1)"); @@ -142,17 +143,19 @@ int main(int argc, char** argv) { } /// Networking params. - unsigned short chain_id = static_cast(taraxa::cli::Config::DEFAULT_CHAIN_ID); - if (vm.count("chain-id")) chain_id = vm["chain-id"].as(); + uint32_t chain_id = static_cast(taraxa::cli::Config::DEFAULT_CHAIN_ID); + if (vm.count("chain-id")) chain_id = vm["chain-id"].as(); std::string listen_ip = "0.0.0.0"; - unsigned short listen_port = 10002; + uint16_t listen_port = 10002; + uint16_t public_port = 0; std::string public_ip; uint32_t num_of_threads = 1; if (vm.count("public-ip")) public_ip = vm["public-ip"].as(); if (vm.count("listen-ip")) listen_ip = vm["listen-ip"].as(); - if (vm.count("listen")) listen_port = vm["listen"].as(); + if (vm.count("listen")) listen_port = vm["listen"].as(); + if (vm.count("public-port")) listen_port = vm["public-port"].as(); if (vm.count("number-of-threads")) num_of_threads = vm["number-of-threads"].as(); setupLogging(logging_options); From 8a8274f539242d88aec6442a8407a97ac4f44b6f Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 15 Feb 2024 13:01:35 +0100 Subject: [PATCH 69/72] feat: use external upd only on finding Neighbour --- libraries/aleth/libp2p/Common.h | 1 + libraries/aleth/libp2p/NodeTable.cpp | 15 ++++++++------- libraries/aleth/libp2p/NodeTable.h | 25 ++++++++++++++++--------- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/libraries/aleth/libp2p/Common.h b/libraries/aleth/libp2p/Common.h index c2bf3dcdab..fa15a5ef45 100644 --- a/libraries/aleth/libp2p/Common.h +++ b/libraries/aleth/libp2p/Common.h @@ -226,6 +226,7 @@ class Node { public: // TODO: p2p implement std::atomic peerType{PeerType::Optional}; + std::optional external_udp_port; }; inline boost::log::formatting_ostream& operator<<(boost::log::formatting_ostream& _strm, Node const& _node) { diff --git a/libraries/aleth/libp2p/NodeTable.cpp b/libraries/aleth/libp2p/NodeTable.cpp index 59d898402d..9410cd0b12 100644 --- a/libraries/aleth/libp2p/NodeTable.cpp +++ b/libraries/aleth/libp2p/NodeTable.cpp @@ -263,7 +263,7 @@ void NodeTable::ping(Node const& _node, shared_ptr _replacementNodeEn PingNode p{m_hostNodeEndpoint, _node.get_endpoint(), chain_id_}; p.expiration = nextRequestExpirationTime(); - if (public_port_) p.public_port = public_port_; + p.seq = m_hostENR.sequenceNumber(); auto const pingHash = p.sign(m_secret); LOG(m_logger) << p.typeName() << " to " << _node; m_socket->send(p); @@ -464,6 +464,10 @@ shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, Disc } } + if (pong.public_port && *pong.public_port != 0) { + sourceNodeEntry->node.external_udp_port = *pong.public_port; + } + m_sentPings.erase(_from); // update our external endpoint address and UDP port @@ -557,10 +561,6 @@ std::shared_ptr NodeTable::handleFindNode(bi::udp::endpoint const& _f } NodeIPEndpoint NodeTable::getSourceEndpoint(bi::udp::endpoint const& from, PingNode const& packet) { - auto port = from.port(); - if (packet.public_port && *packet.public_port != 0) { - port = *packet.public_port; - } if (from.address() != packet.source.address() && !isLocalHostAddress(packet.source.address())) { if (isPrivateAddress(from.address()) && !isPrivateAddress(packet.source.address())) { Guard l(x_ips); @@ -572,11 +572,11 @@ NodeIPEndpoint NodeTable::getSourceEndpoint(bi::udp::endpoint const& from, PingN } else { m_id2IpMap[packet.sourceid] = from; } - m_ipMappings[from] = {packet.source.address(), port, packet.source.tcpPort()}; + m_ipMappings[from] = {packet.source.address(), packet.source.udpPort(), packet.source.tcpPort()}; return m_ipMappings[from]; } } - return {from.address(), port, packet.source.tcpPort()}; + return {from.address(), from.port(), packet.source.tcpPort()}; } std::shared_ptr NodeTable::handlePingNode(bi::udp::endpoint const& _from, DiscoveryDatagram const& _packet) { @@ -605,6 +605,7 @@ std::shared_ptr NodeTable::handlePingNode(bi::udp::endpoint const& _f p.expiration = nextRequestExpirationTime(); p.echo = in.echo; p.seq = m_hostENR.sequenceNumber(); + if (public_port_) p.public_port = public_port_; p.sign(m_secret); m_socket->send(p); diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index 6e1f20ddf8..d78075e615 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -471,16 +471,16 @@ struct PingNode : DiscoveryDatagram { unsigned chain_id = 0; NodeIPEndpoint source; NodeIPEndpoint destination; - std::optional public_port; + std::optional seq; void streamRLP(RLPStream& _s) const override { - _s.appendList(public_port.has_value() ? 6 : 5); + _s.appendList(seq.has_value() ? 6 : 5); _s << dev::p2p::c_protocolVersion; _s << chain_id; source.streamRLP(_s); destination.streamRLP(_s); _s << *expiration; - if (public_port.has_value()) _s << *public_port; + if (seq.has_value()) _s << *seq; } void interpretRLP(bytesConstRef _bytes) override { @@ -490,7 +490,7 @@ struct PingNode : DiscoveryDatagram { source.interpretRLP(r[2]); destination.interpretRLP(r[3]); expiration = r[4].toInt(); - if (r.itemCount() > 5 && r[5].isInt()) public_port = r[5].toInt(); + if (r.itemCount() > 5 && r[5].isInt()) seq = r[5].toInt(); } std::string typeName() const override { return "Ping"; } @@ -508,21 +508,24 @@ struct Pong : DiscoveryDatagram { uint8_t packetType() const override { return type; } NodeIPEndpoint destination; - boost::optional seq; + uint64_t seq; + std::optional public_port; void streamRLP(RLPStream& _s) const override { - _s.appendList(seq.is_initialized() ? 4 : 3); + _s.appendList(public_port.has_value() ? 5 : 4); destination.streamRLP(_s); _s << echo; _s << *expiration; - if (seq.is_initialized()) _s << *seq; + _s << seq; + if (public_port.has_value()) _s << *public_port; } void interpretRLP(bytesConstRef _bytes) override { RLP r(_bytes, RLP::AllowNonCanon | RLP::ThrowOnFail); destination.interpretRLP(r[0]); echo = (h256)r[1]; expiration = r[2].toInt(); - if (r.itemCount() > 3 && r[3].isInt()) seq = r[3].toInt(); + seq = r[3].toInt(); + if (r.itemCount() > 4 && r[4].isInt()) public_port = r[4].toInt(); } std::string typeName() const override { return "Pong"; } @@ -579,7 +582,11 @@ struct Neighbours : DiscoveryDatagram { : DiscoveryDatagram(_from, _fromid, _echo) {} struct Neighbour { - Neighbour(Node const& _node) : endpoint(_node.get_endpoint()), node(_node.id) {} + Neighbour(Node const& _node) : endpoint(_node.get_endpoint()), node(_node.id) { + if (_node.external_udp_port && *_node.external_udp_port != 0) { + endpoint.setUdpPort(*_node.external_udp_port); + } + } Neighbour(RLP const& _r) : endpoint(_r) { node = h512(_r[3].toBytes()); } NodeIPEndpoint endpoint; NodeID node; From 1c6a2659ea8869a1bbe58bc3929ec5924c9ae5dd Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 15 Feb 2024 13:24:35 +0100 Subject: [PATCH 70/72] some more fixes --- docker-entrypoint.sh | 2 +- libraries/aleth/libp2p/NodeTable.cpp | 5 +---- libraries/aleth/libp2p/NodeTable.h | 12 +++++------- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index 383a98244e..1fc521d3e9 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -27,7 +27,7 @@ else if [[ -z "${ADVERTISED_PORT}" ]]; then echo "ADVERTISED_PORT is not set." else - FLAGS="$FLAGS --public-port ${ADVERTISED_PORT}" + FLAGS="$FLAGS --port ${ADVERTISED_PORT}" fi fi diff --git a/libraries/aleth/libp2p/NodeTable.cpp b/libraries/aleth/libp2p/NodeTable.cpp index 9410cd0b12..8f0cb9d080 100644 --- a/libraries/aleth/libp2p/NodeTable.cpp +++ b/libraries/aleth/libp2p/NodeTable.cpp @@ -464,9 +464,7 @@ shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, Disc } } - if (pong.public_port && *pong.public_port != 0) { - sourceNodeEntry->node.external_udp_port = *pong.public_port; - } + sourceNodeEntry->node.external_udp_port = nodeValidation.tcpPort; m_sentPings.erase(_from); @@ -605,7 +603,6 @@ std::shared_ptr NodeTable::handlePingNode(bi::udp::endpoint const& _f p.expiration = nextRequestExpirationTime(); p.echo = in.echo; p.seq = m_hostENR.sequenceNumber(); - if (public_port_) p.public_port = public_port_; p.sign(m_secret); m_socket->send(p); diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index d78075e615..bf13aa8b18 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -508,24 +508,22 @@ struct Pong : DiscoveryDatagram { uint8_t packetType() const override { return type; } NodeIPEndpoint destination; - uint64_t seq; - std::optional public_port; + std::optional seq; void streamRLP(RLPStream& _s) const override { - _s.appendList(public_port.has_value() ? 5 : 4); + _s.appendList(seq.has_value() ? 4 : 3); destination.streamRLP(_s); _s << echo; _s << *expiration; - _s << seq; - if (public_port.has_value()) _s << *public_port; + if (seq.has_value()) _s << *seq; } + void interpretRLP(bytesConstRef _bytes) override { RLP r(_bytes, RLP::AllowNonCanon | RLP::ThrowOnFail); destination.interpretRLP(r[0]); echo = (h256)r[1]; expiration = r[2].toInt(); - seq = r[3].toInt(); - if (r.itemCount() > 4 && r[4].isInt()) public_port = r[4].toInt(); + if (r.itemCount() > 3 && r[3].isInt()) seq = r[3].toInt(); } std::string typeName() const override { return "Pong"; } From d8b72aa2de01d6836cf134a63054f65a0137494d Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 15 Feb 2024 13:48:51 +0100 Subject: [PATCH 71/72] clean up --- libraries/aleth/libp2p/Host.cpp | 2 +- libraries/aleth/libp2p/Network.h | 7 ++----- libraries/aleth/libp2p/NodeTable.cpp | 6 ++---- libraries/aleth/libp2p/NodeTable.h | 4 +--- libraries/cli/include/cli/config.hpp | 1 - libraries/cli/src/config.cpp | 6 ------ libraries/config/include/config/network.hpp | 1 - libraries/core_libs/network/src/network.cpp | 1 - programs/taraxa-bootnode/main.cpp | 9 ++------- 9 files changed, 8 insertions(+), 29 deletions(-) diff --git a/libraries/aleth/libp2p/Host.cpp b/libraries/aleth/libp2p/Host.cpp index a825d9f3c4..1e773acf73 100644 --- a/libraries/aleth/libp2p/Host.cpp +++ b/libraries/aleth/libp2p/Host.cpp @@ -83,7 +83,7 @@ Host::Host(std::string _clientVersion, KeyPair const& kp, NetworkConfig _n, Tara m_nodeTable = make_unique( ioc_, m_alias, NodeIPEndpoint(bi::make_address(listenAddress()), listenPort(), listenPort()), updateENR(enr, m_tcpPublic, listenPort()), m_netConfig.discovery, m_netConfig.allowLocalDiscovery, - taraxa_conf_.is_boot_node, m_netConfig.publicPort, taraxa_conf_.chain_id); + taraxa_conf_.is_boot_node, taraxa_conf_.chain_id); m_nodeTable->setEventHandler(new NodeTableEventHandler([this](auto const&... args) { onNodeTableEvent(args...); })); if (restored_state) { for (auto const& node : restored_state->known_nodes) { diff --git a/libraries/aleth/libp2p/Network.h b/libraries/aleth/libp2p/Network.h index 657478005b..aee0f7ad4a 100644 --- a/libraries/aleth/libp2p/Network.h +++ b/libraries/aleth/libp2p/Network.h @@ -37,14 +37,12 @@ struct NetworkConfig { // Network Preferences with intended Public IP NetworkConfig(std::string const& _publicIP, std::string const& _listenAddress = std::string(), - uint16_t _listenPort = c_defaultListenPort, bool _upnp = true, bool _allowLocalDiscovery = false, - uint16_t publicPort = 0) + uint16_t _listenPort = c_defaultListenPort, bool _upnp = true, bool _allowLocalDiscovery = false) : publicIPAddress(_publicIP), listenIPAddress(_listenAddress), listenPort(_listenPort), traverseNAT(_upnp), - allowLocalDiscovery(_allowLocalDiscovery), - publicPort(publicPort) { + allowLocalDiscovery(_allowLocalDiscovery) { if (!publicIPAddress.empty() && !isPublicAddress(publicIPAddress)) BOOST_THROW_EXCEPTION(InvalidPublicIPAddress()); } @@ -53,7 +51,6 @@ struct NetworkConfig { std::string publicIPAddress; std::string listenIPAddress; uint16_t listenPort = c_defaultListenPort; - uint16_t publicPort = 0; // Announce public port in PING msg /// Preferences diff --git a/libraries/aleth/libp2p/NodeTable.cpp b/libraries/aleth/libp2p/NodeTable.cpp index 8f0cb9d080..74ebd40df1 100644 --- a/libraries/aleth/libp2p/NodeTable.cpp +++ b/libraries/aleth/libp2p/NodeTable.cpp @@ -35,8 +35,7 @@ inline bool operator==(weak_ptr const& _weak, shared_ptr c } NodeTable::NodeTable(ba::io_context& _io, KeyPair const& _alias, NodeIPEndpoint const& _endpoint, ENR const& _enr, - bool _enabled, bool _allowLocalDiscovery, bool is_boot_node, uint16_t public_port, - uint32_t chain_id) + bool _enabled, bool _allowLocalDiscovery, bool is_boot_node, uint32_t chain_id) : strand_(ba::make_strand(_io)), m_hostNodeID{_alias.pub()}, m_hostNodeIDHash{sha3(m_hostNodeID)}, @@ -51,8 +50,7 @@ NodeTable::NodeTable(ba::io_context& _io, KeyPair const& _alias, NodeIPEndpoint m_timeoutsTimer{make_shared(_io)}, m_endpointTrackingTimer{make_shared(_io)}, is_boot_node_(is_boot_node), - chain_id_(chain_id), - public_port_(public_port) { + chain_id_(chain_id) { if (is_boot_node_) { s_bucketSize = BOOT_NODE_BUCKET_SIZE; } diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index bf13aa8b18..d8d9b42b39 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -110,8 +110,7 @@ class NodeTable : UDPSocketEvents { /// Constructor requiring host for I/O, credentials, and IP Address, port to /// listen on and host ENR. NodeTable(ba::io_context& _io, KeyPair const& _alias, NodeIPEndpoint const& _endpoint, ENR const& _enr, - bool _enabled = true, bool _allowLocalDiscovery = false, bool is_boot_node = false, - uint16_t public_port = 0, uint32_t chain_id = 0); + bool _enabled = true, bool _allowLocalDiscovery = false, bool is_boot_node = false, uint32_t chain_id = 0); ~NodeTable() { if (m_socket->isOpen()) { @@ -386,7 +385,6 @@ class NodeTable : UDPSocketEvents { const bool is_boot_node_ = false; const uint32_t chain_id_ = 0; - const uint16_t public_port_ = 0; }; /** diff --git a/libraries/cli/include/cli/config.hpp b/libraries/cli/include/cli/config.hpp index 72977cff04..41aa55e7e4 100644 --- a/libraries/cli/include/cli/config.hpp +++ b/libraries/cli/include/cli/config.hpp @@ -48,7 +48,6 @@ class Config { static constexpr const char* CONFIG_COMMAND = "config"; static constexpr const char* BOOT_NODES = "boot-nodes"; static constexpr const char* PUBLIC_IP = "public-ip"; - static constexpr const char* PUBLIC_PORT = "public-port"; static constexpr const char* PORT = "port"; static constexpr const char* LOG_CHANNELS = "log-channels"; static constexpr const char* LOG_CONFIGURATIONS = "log-configurations"; diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index 201a83ae30..3023f3fbbe 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -26,7 +26,6 @@ Config::Config(int argc, const char* argv[]) { std::vector command; std::vector boot_nodes; std::string public_ip; - uint16_t public_port = 0; uint16_t port = 0; std::vector log_channels; std::vector log_configurations; @@ -109,8 +108,6 @@ Config::Config(int argc, const char* argv[]) { "Boot nodes to connect to in addition to boot nodes defined in config: [ip_address:port_number/node_id, ....]"); node_command_options.add_options()(PUBLIC_IP, bpo::value(&public_ip), "Force advertised public IP to the given IP (default: auto)"); - node_command_options.add_options()(PUBLIC_PORT, bpo::value(&public_port), - "Force advertised public port (default: disabled)"); node_command_options.add_options()(PORT, bpo::value(&port), "Listen on the given port for incoming connections"); node_command_options.add_options()(LOG_CHANNELS, bpo::value>(&log_channels)->multitoken(), @@ -268,9 +265,6 @@ Config::Config(int argc, const char* argv[]) { if (!public_ip.empty()) { node_config_.network.public_ip = public_ip; } - if (public_port) { - node_config_.network.public_port = public_port; - } if (port) { node_config_.network.listen_port = port; } diff --git a/libraries/config/include/config/network.hpp b/libraries/config/include/config/network.hpp index 181e59c6ac..49253ca962 100644 --- a/libraries/config/include/config/network.hpp +++ b/libraries/config/include/config/network.hpp @@ -61,7 +61,6 @@ struct NetworkConfig { std::string json_file_name; std::string public_ip; - uint16_t public_port = 0; std::string listen_ip = "127.0.0.1"; uint16_t listen_port = 0; std::vector boot_nodes; diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 6666575aa3..b5ecbed955 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -55,7 +55,6 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi net_conf.allowLocalDiscovery = true; net_conf.traverseNAT = false; net_conf.publicIPAddress = config.network.public_ip; - net_conf.publicPort = config.network.public_port; net_conf.pin = false; dev::p2p::TaraxaNetworkConfig taraxa_net_conf; diff --git a/programs/taraxa-bootnode/main.cpp b/programs/taraxa-bootnode/main.cpp index 5e37f0418b..7d5b98684c 100644 --- a/programs/taraxa-bootnode/main.cpp +++ b/programs/taraxa-bootnode/main.cpp @@ -101,8 +101,6 @@ int main(int argc, char** argv) { auto addNetworkingOption = client_networking.add_options(); addNetworkingOption("public-ip", po::value()->value_name(""), "Force advertised public IP to the given IP (default: auto)"); - addNetworkingOption("public-port", po::value()->value_name(""), - "Force advertised public port (default: disabled)"); addNetworkingOption("listen-ip", po::value()->value_name(""), "Listen on the given IP for incoming connections (default: 0.0.0.0)"); addNetworkingOption("listen", po::value()->value_name(""), @@ -148,14 +146,12 @@ int main(int argc, char** argv) { std::string listen_ip = "0.0.0.0"; uint16_t listen_port = 10002; - uint16_t public_port = 0; std::string public_ip; uint32_t num_of_threads = 1; if (vm.count("public-ip")) public_ip = vm["public-ip"].as(); if (vm.count("listen-ip")) listen_ip = vm["listen-ip"].as(); if (vm.count("listen")) listen_port = vm["listen"].as(); - if (vm.count("public-port")) listen_port = vm["public-port"].as(); if (vm.count("number-of-threads")) num_of_threads = vm["number-of-threads"].as(); setupLogging(logging_options); @@ -172,9 +168,8 @@ int main(int argc, char** argv) { } } - auto net_conf = public_ip.empty() - ? dev::p2p::NetworkConfig(listen_ip, listen_port, false) - : dev::p2p::NetworkConfig(public_ip, listen_ip, listen_port, false, false, public_port); + auto net_conf = public_ip.empty() ? dev::p2p::NetworkConfig(listen_ip, listen_port, false) + : dev::p2p::NetworkConfig(public_ip, listen_ip, listen_port, false, false); net_conf.allowLocalDiscovery = !denyLocalDiscovery; dev::p2p::TaraxaNetworkConfig taraxa_net_conf; From 18304d406b85693c8c8ca508d3ca416aa22b5ea3 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 16 Feb 2024 10:08:05 +0100 Subject: [PATCH 72/72] chore: make more clean up code --- libraries/aleth/libp2p/Common.h | 2 +- libraries/aleth/libp2p/NodeTable.cpp | 20 +++++++++++++------- libraries/aleth/libp2p/NodeTable.h | 12 ++++++++---- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/libraries/aleth/libp2p/Common.h b/libraries/aleth/libp2p/Common.h index fa15a5ef45..43548c338a 100644 --- a/libraries/aleth/libp2p/Common.h +++ b/libraries/aleth/libp2p/Common.h @@ -226,7 +226,7 @@ class Node { public: // TODO: p2p implement std::atomic peerType{PeerType::Optional}; - std::optional external_udp_port; + std::atomic external_udp_port; }; inline boost::log::formatting_ostream& operator<<(boost::log::formatting_ostream& _strm, Node const& _node) { diff --git a/libraries/aleth/libp2p/NodeTable.cpp b/libraries/aleth/libp2p/NodeTable.cpp index 74ebd40df1..49340f28a9 100644 --- a/libraries/aleth/libp2p/NodeTable.cpp +++ b/libraries/aleth/libp2p/NodeTable.cpp @@ -266,8 +266,9 @@ void NodeTable::ping(Node const& _node, shared_ptr _replacementNodeEn LOG(m_logger) << p.typeName() << " to " << _node; m_socket->send(p); - NodeValidation const validation{_node.id, _node.get_endpoint().tcpPort(), chrono::steady_clock::now(), pingHash, - _replacementNodeEntry}; + NodeValidation const validation{ + _node.id, _node.get_endpoint().tcpPort(), _node.get_endpoint().udpPort(), chrono::steady_clock::now(), + pingHash, _replacementNodeEntry}; m_sentPings.insert({_node.get_endpoint(), validation}); } @@ -449,11 +450,14 @@ shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, Disc shared_ptr sourceNodeEntry; DEV_GUARDED(x_nodes) { auto it = m_allNodes.find(sourceId); - if (it == m_allNodes.end()) + if (it == m_allNodes.end()) { sourceNodeEntry = make_shared(m_hostNodeIDHash, sourceId, NodeIPEndpoint{_from.address(), _from.port(), nodeValidation.tcpPort}, RLPXDatagramFace::secondsSinceEpoch(), 0 /* lastPongSentTime */); - else { + + // We need to setup external port, as we where able to do ping-pong exchange and node is active + sourceNodeEntry->node.external_udp_port = nodeValidation.udpPort; + } else { sourceNodeEntry = it->second; sourceNodeEntry->lastPongReceivedTime = RLPXDatagramFace::secondsSinceEpoch(); @@ -462,8 +466,6 @@ shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, Disc } } - sourceNodeEntry->node.external_udp_port = nodeValidation.tcpPort; - m_sentPings.erase(_from); // update our external endpoint address and UDP port @@ -610,7 +612,11 @@ std::shared_ptr NodeTable::handlePingNode(bi::udp::endpoint const& _f // that shouldn't be a big problem, at worst it can lead to more Ping-Pongs // than needed. std::shared_ptr sourceNodeEntry = nodeEntry(_packet.sourceid); - if (sourceNodeEntry) sourceNodeEntry->lastPongSentTime = RLPXDatagramFace::secondsSinceEpoch(); + if (sourceNodeEntry) { + sourceNodeEntry->lastPongSentTime = RLPXDatagramFace::secondsSinceEpoch(); + // We should update entrypoint the the one that node is reporting + sourceNodeEntry->node.external_udp_port = in.source.udpPort(); + } return sourceNodeEntry; } diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index d8d9b42b39..fb61806d7d 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -199,6 +199,7 @@ class NodeTable : UDPSocketEvents { // endpoint proof (answers with Pong), then it will be added to the bucket // of the node table uint16_t tcpPort = 0; + uint16_t udpPort = 0; // Time we sent Ping - used to handle timeouts TimePoint pingSentTime; // Hash of the sent Ping packet - used to validate received Pong @@ -207,10 +208,11 @@ class NodeTable : UDPSocketEvents { // if original pinged node doesn't answer after timeout std::shared_ptr replacementNodeEntry; - NodeValidation(NodeID const& _nodeID, uint16_t _tcpPort, TimePoint const& _pingSentTime, h256 const& _pingHash, - std::shared_ptr _replacementNodeEntry) + NodeValidation(NodeID const& _nodeID, uint16_t _tcpPort, uint16_t _udpPort, TimePoint const& _pingSentTime, + h256 const& _pingHash, std::shared_ptr _replacementNodeEntry) : nodeID{_nodeID}, tcpPort{_tcpPort}, + udpPort{_udpPort}, pingSentTime{_pingSentTime}, pingHash{_pingHash}, replacementNodeEntry{std::move(_replacementNodeEntry)} {} @@ -579,8 +581,10 @@ struct Neighbours : DiscoveryDatagram { struct Neighbour { Neighbour(Node const& _node) : endpoint(_node.get_endpoint()), node(_node.id) { - if (_node.external_udp_port && *_node.external_udp_port != 0) { - endpoint.setUdpPort(*_node.external_udp_port); + // For external node we need to replace udp to reported one as we can communicate on upd port that's not available + // to everyone + if (_node.external_udp_port != 0) { + endpoint.setUdpPort(_node.external_udp_port); } } Neighbour(RLP const& _r) : endpoint(_r) { node = h512(_r[3].toBytes()); }