diff --git a/CMakeLists.txt b/CMakeLists.txt index 6bb171e4e3..1209bfbfda 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,12 +2,12 @@ cmake_minimum_required(VERSION 3.20) # Set current version of the project set(TARAXA_MAJOR_VERSION 1) -set(TARAXA_MINOR_VERSION 11) -set(TARAXA_PATCH_VERSION 4) +set(TARAXA_MINOR_VERSION 12) +set(TARAXA_PATCH_VERSION 1) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased -set(TARAXA_NET_VERSION 3) +set(TARAXA_NET_VERSION 4) # Major version is modified when DAG blocks, pbft blocks and any basic building blocks of our blockchain is modified # in the db set(TARAXA_DB_MAJOR_VERSION 1) @@ -32,7 +32,8 @@ add_compile_options(-Wall -Wextra-semi -Wnull-dereference -Wno-unknown-pragmas - -Wno-overlength-strings) + -Wno-overlength-strings + -Wno-switch) # Set the position independent code property on all targets set(CMAKE_POSITION_INDEPENDENT_CODE ON) @@ -217,9 +218,6 @@ find_package(MPFR) set(JSONCPP_INCLUDE_DIR ${CONAN_INCLUDE_DIRS_JSONCPP}) include(ProjectJSONRPCCPP) -# rocksdb build -include(${PROJECT_SOURCE_DIR}/CMakeModules/rocksdb.cmake) - # Add sub-directories cmakes add_subdirectory(submodules) add_subdirectory(libraries) diff --git a/CMakeModules/cpp_graphql_gen.cmake b/CMakeModules/cpp_graphql_gen.cmake index 3816ec6256..8fff070ad2 100644 --- a/CMakeModules/cpp_graphql_gen.cmake +++ b/CMakeModules/cpp_graphql_gen.cmake @@ -6,7 +6,7 @@ set(Boost_NO_WARN_NEW_VERSIONS 1) FetchContent_Declare( cppgraphqlgen GIT_REPOSITORY https://github.com/microsoft/cppgraphqlgen.git - GIT_TAG v4.5.6 + GIT_TAG v4.5.8 GIT_SHALLOW TRUE ) set(GRAPHQL_BUILD_TESTS OFF) diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index 1472b400d8..33bb8c8186 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -10,6 +10,7 @@ else () COMMAND ${CPP_CHECK_EXE} --error-exitcode=1 --enable=all + --check-level=exhaustive --suppress=missingInclude --suppress=missingIncludeSystem # find_if - useless here @@ -19,7 +20,6 @@ else () # false positive --suppress=uninitMemberVar:${PROJECT_SOURCE_DIR}/*/UPnP.cpp # This is only enabled because of test functions and false positives - --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Eth.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Common.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Base64.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/util.cpp @@ -27,6 +27,13 @@ else () --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/UPnP.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/logger.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/state_api.cpp + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/watches.hpp + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/vector_ref.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/RLP.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/FixedHash.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/CommonData.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Common.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Log.h # TODO remove this when we solve correct exit of programs --suppress=localMutex:${PROJECT_SOURCE_DIR}/*/main.cpp # Just style warning @@ -38,6 +45,7 @@ else () --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/Common.h --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/vector_ref.h --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/Common.h + --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/transaction.cpp # not an issue here --suppress=virtualCallInConstructor:${PROJECT_SOURCE_DIR}/*/final_chain.cpp # just a warning diff --git a/CMakeModules/rocksdb.cmake b/CMakeModules/rocksdb.cmake deleted file mode 100644 index 86a9c12b9a..0000000000 --- a/CMakeModules/rocksdb.cmake +++ /dev/null @@ -1,32 +0,0 @@ -# ========================================================================== # -# RocksDB key-value store # -# ========================================================================== # -include(FetchContent) - -set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) - -FetchContent_Declare( - rocksdb - GIT_REPOSITORY https://github.com/facebook/rocksdb - GIT_TAG v8.5.3 - GIT_SHALLOW TRUE -) - -FetchContent_GetProperties(rocksdb) - -message(STATUS "Populating rocksdb") -set(USE_RTTI 1) -set(WITH_LZ4 ON) -set(WITH_GFLAGS OFF) -set(FAIL_ON_WARNINGS OFF) -set(PORTABLE 1 CACHE STRING "Override: Minimum CPU arch to support") # Disable -march=native -set(WITH_TESTS OFF CACHE INTERNAL "") -set(WITH_JNI OFF CACHE INTERNAL "") -set(WITH_TOOLS OFF CACHE INTERNAL "") -set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") -set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") -set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") -set(ROCKSDB_BUILD_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") - -FetchContent_MakeAvailable(rocksdb) diff --git a/conanfile.py b/conanfile.py index ef649a6957..9d1f5beccf 100644 --- a/conanfile.py +++ b/conanfile.py @@ -13,13 +13,14 @@ class TaraxaConan(ConanFile): generators = "cmake" def requirements(self): - self.requires("boost/1.85.0") - self.requires("cppcheck/2.12") #TODO(2.14.1) - self.requires("openssl/3.2.1") + self.requires("boost/1.86.0") + self.requires("cppcheck/2.15.0") + self.requires("openssl/3.3.2") self.requires("cryptopp/8.9.0") - self.requires("gtest/1.14.0") - self.requires("lz4/1.9.4") - self.requires("prometheus-cpp/1.1.0") + self.requires("gtest/1.15.0") + self.requires("lz4/1.10.0") + self.requires("rocksdb/9.2.1") + self.requires("prometheus-cpp/1.2.4") self.requires("jsoncpp/1.9.5") def _configure_boost_libs(self): @@ -60,6 +61,8 @@ def configure(self): self.options["gtest"].build_gmock = False # this links cppcheck to prce library self.options["cppcheck"].have_rules = False + self.options["rocksdb"].use_rtti = True + self.options["rocksdb"].with_lz4 = True # mpir is required by cppcheck and it causing gmp confict self.options["mpir"].enable_gmpcompat = False diff --git a/doc/building.md b/doc/building.md index 7af1fedd94..ae79c11362 100644 --- a/doc/building.md +++ b/doc/building.md @@ -20,12 +20,12 @@ will build out of the box without further effort: autoconf \ ccache \ cmake \ + clang \ clang-format-17 \ clang-tidy-17 \ llvm-17 \ golang-go \ python3-full \ - # this libs are required for arm build by go part. you can skip it for amd64 build libzstd-dev \ libsnappy-dev \ rapidjson-dev \ @@ -47,7 +47,7 @@ will build out of the box without further effort: ### Clone the Repository - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet + git clone https://github.com/Taraxa-project/taraxa-node.git cd taraxa-node git submodule update --init --recursive @@ -62,8 +62,7 @@ will build out of the box without further effort: && conan profile update settings.compiler.libcxx=libstdc++11 clang \ && conan profile update settings.build_type=RelWithDebInfo clang \ && conan profile update env.CC=clang-17 clang \ - && conan profile update env.CXX=clang++-17 clang \ - && conan install --build missing -pr=clang . + && conan profile update env.CXX=clang++-17 clang # Compile project using cmake mkdir cmake-build @@ -71,171 +70,18 @@ will build out of the box without further effort: cmake -DCONAN_PROFILE=clang -DCMAKE_BUILD_TYPE=RelWithDebInfo -DTARAXA_ENABLE_LTO=OFF -DTARAXA_STATIC_BUILD=OFF ../ make -j$(nproc) -## Building on Ubuntu 22.04 -For Ubuntu 22.04 users, after installing the right packages with `apt` taraxa-node -will build out of the box without further effort: - -### Install taraxa-node dependencies: - - # Required packages - sudo apt-get install -y \ - libtool \ - autoconf \ - ccache \ - cmake \ - clang-format-14 \ - clang-tidy-14 \ - golang-go \ - python3-pip \ - # this libs are required for arm build by go part. you can skip it for amd64 build - libzstd-dev \ - libsnappy-dev \ - rapidjson-dev \ - libgmp-dev \ - libmpfr-dev \ - libmicrohttpd-dev - - # Optional. Needed to run py_test. This won't install on arm64 OS because package is missing in apt - sudo add-apt-repository ppa:ethereum/ethereum - sudo apt-get update - sudo apt install solc - - # Install conan package manager - sudo python3 -m pip install conan==1.64.1 - - # Setup clang as default compiler either in your IDE or by env. variables" - export CC="clang-14" - export CXX="clang++-14" - -### Clone the Repository - - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet - cd taraxa-node - git submodule update --init --recursive - -### Compile - - # Optional - one time action - # Create clang profile - # It is recommended to use clang because on other compilers you could face some errors - conan profile new clang --detect && \ - conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=14 clang && \ - conan profile update settings.compiler.libcxx=libstdc++11 clang && \ - conan profile update env.CC=clang-14 clang && \ - conan profile update env.CXX=clang++-14 clang - - # Export needed var for conan - export CONAN_REVISIONS_ENABLED=1 - - # Compile project using cmake - mkdir cmake-build - cd cmake-build - cmake -DCONAN_PROFILE=clang -DCMAKE_BUILD_TYPE=RelWithDebInfo -DTARAXA_ENABLE_LTO=OFF -DTARAXA_STATIC_BUILD=OFF ../ - make -j$(nproc) - -## Building on Ubuntu 20.04 -For Ubuntu 20.04 users, after installing the right packages with `apt` taraxa-node -will build out of the box without further effort: - -### Install taraxa-node dependencies: - - # Required packages - sudo apt-get install -y \ - libtool \ - autoconf \ - ccache cmake gcc g++ clang-format clang-tidy cppcheck \ - libgflags-dev\ - libjsoncpp-dev \ - libjsonrpccpp-dev \ - python3-pip \ - rapidjson-dev \ - libgmp-dev \ - libmpfr-dev \ - libmicrohttpd-dev - - - # Install conan package manager - # >= 1.36.0 version is required to work properly with clang-14 - sudo python3 -m pip install conan==1.60.0 - - # Install cmake - # >= 3.20 version is required for JSON subcommand - # Setup your IDE accordingly to use this version - sudo python3 -m pip install cmake - - # Go (required) - curl -LO https://go.dev/dl/go1.22.2.linux-amd64.tar.gz - sudo tar -C /usr/local -xzf go1.22.2.linux-amd64.tar.gz - rm -rf go1.22.2.linux-amd64.tar.gz - - # Add go to PATH - # Add these env. variables to the ~/.profile to persist go settings even after restart - export GOROOT=/usr/local/go - export GOPATH=$HOME/.go - export PATH=$GOPATH/bin:$GOROOT/bin:$PATH - - # Optional - # We are using clang from llvm toolchain as default compiler as well as clang-format and clang-tidy - # It is possible to build taraxa-node also with other C++ compilers but to contribute to the official repo, - # changes must pass clang-format/clang-tidy checks for which we internally use llvm version=13 - # To install llvm: - sudo su - - curl -SL -o llvm.sh https://apt.llvm.org/llvm.sh && \ - chmod +x llvm.sh && \ - ./llvm.sh 14 && \ - apt-get install -y clang-format-14 clang-tidy-14 && \ - rm -f llvm.sh - - # Setup clang as default compiler either in your IDE or by env. variables" - export CC="clang-14" - export CXX="clang++-14" - -### Clone the Repository - - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet - cd taraxa-node - git submodule update --init --recursive - -### Compile - - # Optional - one time action - # Create clang profile - # It is recommended to use clang because on other compilers you could face some errors - conan profile new clang --detect && \ - conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=14 clang && \ - conan profile update settings.compiler.libcxx=libstdc++11 clang && \ - conan profile update env.CC=clang-14 clang && \ - conan profile update env.CXX=clang++-14 clang - - # Export needed var for conan - export CONAN_REVISIONS_ENABLED=1 - - # Compile project using cmake - mkdir cmake-build - cd cmake-build - cmake -DCONAN_PROFILE=clang -DCMAKE_BUILD_TYPE=RelWithDebInfo -DTARAXA_ENABLE_LTO=OFF -DTARAXA_STATIC_BUILD=OFF ../ - make -j$(nproc) - -And optional: - - # optional - make install # defaults to /usr/local - ## Building on MacOS ### Install taraxa-node dependencies: -First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Clang-14 is used for compilation. +First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Clang-17 is used for compilation. brew update - brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd + brew install coreutils go autoconf automake gflags git libtool llvm@17 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd ### Clone the Repository - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet + git clone https://github.com/Taraxa-project/taraxa-node.git cd taraxa-node git submodule update --init --recursive @@ -245,8 +91,8 @@ First you need to get (Brew)[https://brew.sh/] package manager. After that you n # It is recommended to use clang because on other compilers you could face some errors conan profile new clang --detect && \ conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=14 clang && \ - conan profile update settings.compiler.compiler.cppstd=14 + conan profile update settings.compiler.version=17 clang && \ + conan profile update settings.compiler.compiler.cppstd=17 conan profile update settings.compiler.libcxx=libc++ clang && \ conan profile update env.CC=clang clang && \ conan profile update env.CXX=clang++ clang @@ -304,7 +150,7 @@ You should be able to build project following default MacOS building process. Bu ### Clone the Repository - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet + git clone https://github.com/Taraxa-project/taraxa-node.git cd taraxa-node git submodule update --init --recursive @@ -316,7 +162,7 @@ You should be able to build project following default MacOS building process. Bu # It output should be equal to `i386` conan profile new clang --detect && \ conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=14 clang && \ + conan profile update settings.compiler.version=17 clang && \ conan profile update settings.compiler.libcxx=libc++ clang && \ conan profile update env.CC=/usr/local/opt/llvm/bin/clang clang && \ conan profile update env.CXX=/usr/local/opt/llvm/bin/clang++ clang diff --git a/for_devs/local-net b/for_devs/local-net index 8add8bec84..4a9087bffb 100755 --- a/for_devs/local-net +++ b/for_devs/local-net @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - import click import subprocess import threading @@ -313,7 +312,7 @@ def faucet_worker(tps): time.sleep(10) web3 = Web3(Web3.HTTPProvider('http://127.0.0.1:7017')) nonce = web3.eth.get_transaction_count( - Web3.to_checksum_address(faucet_public_address)) + Web3.toChecksumAddress(faucet_public_address)) consensus_nodes = list(consensus_nodes_public_addresses.keys()) @@ -323,10 +322,10 @@ def faucet_worker(tps): 0, len(consensus_nodes)-1)]] tx = { 'nonce': nonce, - 'to': Web3.to_checksum_address(to), - 'value': web3.to_wei(100000000, 'gwei'), + 'to': Web3.toChecksumAddress(to), + 'value': web3.toWei(100000000, 'gwei'), 'gas': 21000, - 'gasPrice': web3.to_wei(1, 'gwei'), + 'gasPrice': web3.toWei(1, 'gwei'), 'chainId': int(chain_id) } nonce = nonce + 1 @@ -338,7 +337,7 @@ def faucet_worker(tps): try: tx_hash = web3.eth.send_raw_transaction(signed_tx.rawTransaction) log_format( - 'faucet', f'{t} Dripped to {to}, tx_hash: {web3.to_hex(tx_hash)}') + 'faucet', f'{t} Dripped to {to}, tx_hash: {web3.toHex(tx_hash)}') except Exception as e: log_format('faucet', f'{t} Failed to drip to {to}. Error: {str(e)}') pass diff --git a/libraries/aleth/libdevcore/Base64.h b/libraries/aleth/libdevcore/Base64.h index 61e7010682..cb67f2d812 100644 --- a/libraries/aleth/libdevcore/Base64.h +++ b/libraries/aleth/libdevcore/Base64.h @@ -31,7 +31,7 @@ #include -#include "FixedHash.h" +#include "Common.h" namespace dev { std::string toBase64(bytesConstRef _in); diff --git a/libraries/aleth/libdevcore/Common.cpp b/libraries/aleth/libdevcore/Common.cpp index 4600bb2998..c7365a2c4e 100644 --- a/libraries/aleth/libdevcore/Common.cpp +++ b/libraries/aleth/libdevcore/Common.cpp @@ -4,7 +4,6 @@ #include "Common.h" -#include "Exceptions.h" #include "Log.h" #if defined(_WIN32) diff --git a/libraries/aleth/libdevcore/Common.h b/libraries/aleth/libdevcore/Common.h index e7dfe89035..6f89f48476 100644 --- a/libraries/aleth/libdevcore/Common.h +++ b/libraries/aleth/libdevcore/Common.h @@ -12,7 +12,6 @@ #include #include #include -#include #include #pragma warning(push) #pragma GCC diagnostic push @@ -128,33 +127,6 @@ using strings = std::vector; // Null/Invalid values for convenience. extern bytes const NullBytes; -/// Interprets @a _u as a two's complement signed number and returns the -/// resulting s256. -inline s256 u2s(u256 _u) { - static const bigint c_end = bigint(1) << 256; - if (boost::multiprecision::bit_test(_u, 255)) - return s256(-(c_end - _u)); - else - return s256(_u); -} - -/// @returns the two's complement signed representation of the signed number _u. -inline u256 s2u(s256 _u) { - static const bigint c_end = bigint(1) << 256; - if (_u >= 0) - return u256(_u); - else - return u256(c_end + _u); -} - -/// @returns the smallest n >= 0 such that (1 << n) >= _x -inline unsigned int toLog2(u256 _x) { - unsigned ret; - for (ret = 0; _x >>= 1; ++ret) { - } - return ret; -} - template inline u256 exp10() { return exp10() * u256(10); @@ -165,12 +137,6 @@ inline u256 exp10<0>() { return u256(1); } -/// @returns the absolute distance between _a and _b. -template -inline N diff(N const& _a, N const& _b) { - return std::max(_a, _b) - std::min(_a, _b); -} - /// RAII utility class whose destructor calls a given function. class ScopeGuard { public: diff --git a/libraries/aleth/libdevcore/CommonData.cpp b/libraries/aleth/libdevcore/CommonData.cpp index 1137c1f97d..cc2fa0f876 100644 --- a/libraries/aleth/libdevcore/CommonData.cpp +++ b/libraries/aleth/libdevcore/CommonData.cpp @@ -4,7 +4,9 @@ #include "CommonData.h" -#include +#include + +#include #include "Exceptions.h" diff --git a/libraries/aleth/libdevcore/CommonIO.h b/libraries/aleth/libdevcore/CommonIO.h index 989bfc7c58..3dda2965a3 100644 --- a/libraries/aleth/libdevcore/CommonIO.h +++ b/libraries/aleth/libdevcore/CommonIO.h @@ -10,8 +10,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/libraries/aleth/libdevcore/CommonJS.h b/libraries/aleth/libdevcore/CommonJS.h index d0c17b8595..e7bb61b363 100644 --- a/libraries/aleth/libdevcore/CommonJS.h +++ b/libraries/aleth/libdevcore/CommonJS.h @@ -8,7 +8,6 @@ #include #include "CommonData.h" -#include "CommonIO.h" #include "FixedHash.h" namespace dev { diff --git a/libraries/aleth/libdevcore/Guards.cpp b/libraries/aleth/libdevcore/Guards.cpp deleted file mode 100644 index 3f852d4c0a..0000000000 --- a/libraries/aleth/libdevcore/Guards.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Aleth: Ethereum C++ client, tools and libraries. -// Copyright 2014-2019 Aleth Authors. -// Licensed under the GNU General Public License, Version 3. - -#include "Guards.h" -using namespace std; -using namespace dev; - -namespace dev {} diff --git a/libraries/aleth/libdevcore/Guards.h b/libraries/aleth/libdevcore/Guards.h index 57cb71f5f3..9dc94c4c77 100644 --- a/libraries/aleth/libdevcore/Guards.h +++ b/libraries/aleth/libdevcore/Guards.h @@ -4,7 +4,6 @@ #pragma once -#include #include #include #pragma warning(push) diff --git a/libraries/aleth/libdevcore/Log.h b/libraries/aleth/libdevcore/Log.h index f91a640627..518f430f7a 100644 --- a/libraries/aleth/libdevcore/Log.h +++ b/libraries/aleth/libdevcore/Log.h @@ -11,7 +11,6 @@ #include #include -#include "CommonIO.h" #include "FixedHash.h" #include "Terminal.h" diff --git a/libraries/aleth/libdevcore/RLP.cpp b/libraries/aleth/libdevcore/RLP.cpp index a8a34966cb..23118b0dd8 100644 --- a/libraries/aleth/libdevcore/RLP.cpp +++ b/libraries/aleth/libdevcore/RLP.cpp @@ -2,6 +2,14 @@ // Copyright 2013-2019 Aleth Authors. // Licensed under the GNU General Public License, Version 3. #include "RLP.h" + +#include +#include + +#include +#include +#include + using namespace std; using namespace dev; diff --git a/libraries/aleth/libdevcore/RLP.h b/libraries/aleth/libdevcore/RLP.h index 4a7ea2bce1..45301cf5b9 100644 --- a/libraries/aleth/libdevcore/RLP.h +++ b/libraries/aleth/libdevcore/RLP.h @@ -7,8 +7,6 @@ #pragma once #include -#include -#include #include #include #include diff --git a/libraries/aleth/libdevcore/SHA3.cpp b/libraries/aleth/libdevcore/SHA3.cpp index 1ffd34890f..77da7e8016 100644 --- a/libraries/aleth/libdevcore/SHA3.cpp +++ b/libraries/aleth/libdevcore/SHA3.cpp @@ -6,6 +6,8 @@ #include +#include "ethash/hash_types.hpp" + namespace dev { bool sha3(bytesConstRef _input, bytesRef o_output) noexcept { diff --git a/libraries/aleth/libdevcrypto/AES.cpp b/libraries/aleth/libdevcrypto/AES.cpp index a7da21c779..473694acd9 100644 --- a/libraries/aleth/libdevcrypto/AES.cpp +++ b/libraries/aleth/libdevcrypto/AES.cpp @@ -9,9 +9,9 @@ #include #include #include +#include using namespace dev; -using namespace dev::crypto; bytes dev::aesDecrypt(bytesConstRef _ivCipher, std::string const& _password, unsigned _rounds, bytesConstRef _salt) { bytes pw = asBytes(_password); diff --git a/libraries/aleth/libdevcrypto/AES.h b/libraries/aleth/libdevcrypto/AES.h index c2ec6fcfd5..fb5589bf2c 100644 --- a/libraries/aleth/libdevcrypto/AES.h +++ b/libraries/aleth/libdevcrypto/AES.h @@ -8,7 +8,7 @@ #pragma once -#include "Common.h" +#include namespace dev { diff --git a/libraries/aleth/libdevcrypto/Common.cpp b/libraries/aleth/libdevcrypto/Common.cpp index c6ca11776d..f3908b6f55 100644 --- a/libraries/aleth/libdevcrypto/Common.cpp +++ b/libraries/aleth/libdevcrypto/Common.cpp @@ -6,8 +6,6 @@ #include #include -#include -#include #include // conflicts with #include #include @@ -17,7 +15,6 @@ #include "AES.h" #include "CryptoPP.h" -#include "Exceptions.h" using namespace std; using namespace dev; using namespace dev::crypto; diff --git a/libraries/aleth/libdevcrypto/CryptoPP.cpp b/libraries/aleth/libdevcrypto/CryptoPP.cpp index 42ebf71d89..5a432d9514 100644 --- a/libraries/aleth/libdevcrypto/CryptoPP.cpp +++ b/libraries/aleth/libdevcrypto/CryptoPP.cpp @@ -7,9 +7,7 @@ #include #include #include -#include #include // conflicts with -#include static_assert(CRYPTOPP_VERSION >= 565, "Wrong Crypto++ version"); diff --git a/libraries/aleth/libp2p/All.h b/libraries/aleth/libp2p/All.h deleted file mode 100644 index 626ff2582f..0000000000 --- a/libraries/aleth/libp2p/All.h +++ /dev/null @@ -1,6 +0,0 @@ -#pragma once - -#include "Capability.h" -#include "Common.h" -#include "Host.h" -#include "Session.h" diff --git a/libraries/aleth/libp2p/Common.cpp b/libraries/aleth/libp2p/Common.cpp index 011c443ae0..5959fc2d71 100644 --- a/libraries/aleth/libp2p/Common.cpp +++ b/libraries/aleth/libp2p/Common.cpp @@ -7,6 +7,7 @@ #include #include "Network.h" +#include "libdevcore/CommonIO.h" namespace dev { namespace p2p { diff --git a/libraries/aleth/libp2p/Common.h b/libraries/aleth/libp2p/Common.h index 43548c338a..591639f004 100644 --- a/libraries/aleth/libp2p/Common.h +++ b/libraries/aleth/libp2p/Common.h @@ -25,8 +25,9 @@ #include #include #include + namespace ba = boost::asio; -namespace bi = boost::asio::ip; +namespace bi = ba::ip; namespace dev { diff --git a/libraries/aleth/libp2p/Host.cpp b/libraries/aleth/libp2p/Host.cpp index 1e773acf73..e7f3a1a8b0 100644 --- a/libraries/aleth/libp2p/Host.cpp +++ b/libraries/aleth/libp2p/Host.cpp @@ -4,24 +4,15 @@ #include "Host.h" -#include -#include -#include -#include - -#include #include #include #include -#include #include -#include #include "Capability.h" #include "Common.h" #include "RLPxHandshake.h" #include "Session.h" -#include "UPnP.h" using namespace std; using namespace dev; @@ -296,7 +287,8 @@ void Host::startPeerSession(Public const& _id, RLP const& _hello, unique_ptraddress()); + if (!disconnect_reason && (!peerSlotsAvailable() && !is_trusted_node)) { cnetdetails << "Too many peers, can't connect. peer count: " << peer_count_() << " pending peers: " << m_pendingPeerConns.size(); disconnect_reason = TooManyPeers; @@ -420,7 +412,9 @@ void Host::runAcceptor() { return; } auto socket = make_shared(std::move(_socket)); - if (peer_count_() > peerSlots(Ingress)) { + // Since a connecting peer might be a trusted node which should always connect allow up to max number of trusted + // nodes above the limit + if (peer_count_() > (peerSlots(Ingress) + m_netConfig.trustedNodes.size())) { cnetdetails << "Dropping incoming connect due to maximum peer count (" << Ingress << " * ideal peer count): " << socket->remoteEndpoint(); socket->close(); diff --git a/libraries/aleth/libp2p/Host.h b/libraries/aleth/libp2p/Host.h index c7299252a7..628e12d53b 100644 --- a/libraries/aleth/libp2p/Host.h +++ b/libraries/aleth/libp2p/Host.h @@ -4,16 +4,14 @@ #pragma once +#include #include #include #include #include -#include #include -#include #include -#include #include #include @@ -26,9 +24,6 @@ #include "Session.h" #include "taraxa.hpp" -namespace io = boost::asio; -namespace bi = io::ip; - namespace std { template <> struct hash> { diff --git a/libraries/aleth/libp2p/Network.cpp b/libraries/aleth/libp2p/Network.cpp index 41b98c5c3d..7561aec69a 100644 --- a/libraries/aleth/libp2p/Network.cpp +++ b/libraries/aleth/libp2p/Network.cpp @@ -2,17 +2,15 @@ // Copyright 2014-2019 Aleth Authors. // Licensed under the GNU General Public License, Version 3. -#include #ifndef _WIN32 #include #endif #include -#include #include #include -#include +#include #include #include "Common.h" diff --git a/libraries/aleth/libp2p/Network.h b/libraries/aleth/libp2p/Network.h index aee0f7ad4a..082a2b3ba1 100644 --- a/libraries/aleth/libp2p/Network.h +++ b/libraries/aleth/libp2p/Network.h @@ -7,14 +7,7 @@ #include #include -#include -#include -#include -#include - #include "Common.h" -namespace ba = boost::asio; -namespace bi = ba::ip; namespace dev { namespace p2p { @@ -52,6 +45,9 @@ struct NetworkConfig { std::string listenIPAddress; uint16_t listenPort = c_defaultListenPort; + /// Trusted Nodes + std::unordered_set trustedNodes; + /// Preferences bool traverseNAT = true; diff --git a/libraries/aleth/libp2p/NodeTable.cpp b/libraries/aleth/libp2p/NodeTable.cpp index 49575dff69..82fba3a91b 100644 --- a/libraries/aleth/libp2p/NodeTable.cpp +++ b/libraries/aleth/libp2p/NodeTable.cpp @@ -5,7 +5,6 @@ #include "NodeTable.h" #include -using namespace std; namespace dev { namespace p2p { @@ -15,9 +14,9 @@ BOOST_LOG_INLINE_GLOBAL_LOGGER_CTOR_ARGS(g_discoveryWarnLogger, boost::log::sour (boost::log::keywords::severity = 0)(boost::log::keywords::channel = "discov")) // Cadence at which we timeout sent pings and evict unresponsive nodes -constexpr chrono::milliseconds c_handleTimeoutsIntervalMs{5000}; +constexpr std::chrono::milliseconds c_handleTimeoutsIntervalMs{5000}; // Cadence at which we remove old records from EndpointTracker -constexpr chrono::milliseconds c_removeOldEndpointStatementsIntervalMs{5000}; +constexpr std::chrono::milliseconds c_removeOldEndpointStatementsIntervalMs{5000}; // Change external endpoint after this number of peers report new one constexpr size_t c_minEndpointTrackStatements{10}; // Interval during which each endpoint statement is kept @@ -25,12 +24,12 @@ constexpr std::chrono::minutes c_endpointStatementTimeToLiveMin{5}; } // namespace -constexpr chrono::seconds DiscoveryDatagram::c_timeToLiveS; -constexpr chrono::milliseconds NodeTable::c_reqTimeoutMs; -constexpr chrono::milliseconds NodeTable::c_bucketRefreshMs; -constexpr chrono::milliseconds NodeTable::c_discoveryRoundIntervalMs; +constexpr std::chrono::seconds DiscoveryDatagram::c_timeToLiveS; +constexpr std::chrono::milliseconds NodeTable::c_reqTimeoutMs; +constexpr std::chrono::milliseconds NodeTable::c_bucketRefreshMs; +constexpr std::chrono::milliseconds NodeTable::c_discoveryRoundIntervalMs; -inline bool operator==(weak_ptr const& _weak, shared_ptr const& _shared) { +inline bool operator==(std::weak_ptr const& _weak, std::shared_ptr const& _shared) { return !_weak.owner_before(_shared) && !_shared.owner_before(_weak); } @@ -100,8 +99,8 @@ bool NodeTable::addKnownNode(Node const& _node, uint32_t _lastPongReceivedTime, return true; } - auto entry = make_shared(m_hostNodeIDHash, _node.id, _node.get_endpoint(), _lastPongReceivedTime, - _lastPongSentTime); + auto entry = std::make_shared(m_hostNodeIDHash, _node.id, _node.get_endpoint(), _lastPongReceivedTime, + _lastPongSentTime); if (entry->hasValidEndpointProof()) { LOG(m_logger) << "Known " << _node; @@ -137,16 +136,16 @@ bool NodeTable::isValidNode(Node const& _node) const { return true; } -list NodeTable::nodes() const { - list nodes; +std::list NodeTable::nodes() const { + std::list nodes; DEV_GUARDED(x_nodes) { for (auto& i : m_allNodes) nodes.push_back(i.second->id()); } return nodes; } -list NodeTable::snapshot() const { - list ret; +std::list NodeTable::snapshot() const { + std::list ret; DEV_GUARDED(x_state) { for (auto const& s : m_buckets) for (auto const& np : s.nodes) @@ -165,13 +164,14 @@ Node NodeTable::node(NodeID const& _id) { return UnspecifiedNode; } -shared_ptr NodeTable::nodeEntry(NodeID const& _id) { +std::shared_ptr NodeTable::nodeEntry(NodeID const& _id) { Guard l(x_nodes); auto const it = m_allNodes.find(_id); - return it != m_allNodes.end() ? it->second : shared_ptr(); + return it != m_allNodes.end() ? it->second : std::shared_ptr(); } -void NodeTable::doDiscoveryRound(NodeID _node, unsigned _round, shared_ptr>> _tried) { +void NodeTable::doDiscoveryRound(NodeID _node, unsigned _round, + std::shared_ptr>> _tried) { // NOTE: ONLY called by doDiscovery or "recursively" via lambda scheduled via // timer at the end of this function if (!m_socket->isOpen()) return; @@ -192,7 +192,7 @@ void NodeTable::doDiscoveryRound(NodeID _node, unsigned _round, shared_ptrendpoint(), _node); p.expiration = nextRequestExpirationTime(); p.sign(m_secret); - m_sentFindNodes.emplace_back(nodeEntry->id(), chrono::steady_clock::now()); + m_sentFindNodes.emplace_back(nodeEntry->id(), std::chrono::steady_clock::now()); LOG(m_logger) << p.typeName() << " to " << nodeEntry->node << " (target: " << _node << ")"; m_socket->send(p); @@ -225,15 +225,15 @@ void NodeTable::doDiscoveryRound(NodeID _node, unsigned _round, shared_ptr> NodeTable::nearestNodeEntries(NodeID const& _target) { - auto const distanceToTargetLess = [](pair> const& _node1, - pair> const& _node2) { +std::vector> NodeTable::nearestNodeEntries(NodeID const& _target) { + auto const distanceToTargetLess = [](std::pair> const& _node1, + std::pair> const& _node2) { return _node1.first < _node2.first; }; h256 const targetHash = sha3(_target); - std::multiset>, decltype(distanceToTargetLess)> nodesByDistanceToTarget( + std::multiset>, decltype(distanceToTargetLess)> nodesByDistanceToTarget( distanceToTargetLess); for (auto const& bucket : m_buckets) for (auto const& nodeWeakPtr : bucket.nodes) @@ -244,13 +244,13 @@ vector> NodeTable::nearestNodeEntries(NodeID const& _targe nodesByDistanceToTarget.erase(--nodesByDistanceToTarget.end()); } - vector> ret; + std::vector> ret; for (auto& distanceAndNode : nodesByDistanceToTarget) ret.emplace_back(std::move(distanceAndNode.second)); return ret; } -void NodeTable::ping(Node const& _node, shared_ptr _replacementNodeEntry) { +void NodeTable::ping(Node const& _node, std::shared_ptr _replacementNodeEntry) { if (!m_socket->isOpen()) return; // Don't send Ping if one is already sent @@ -267,7 +267,7 @@ void NodeTable::ping(Node const& _node, shared_ptr _replacementNodeEn m_socket->send(p); NodeValidation const validation{ - _node.id, _node.get_endpoint().tcpPort(), _node.get_endpoint().udpPort(), chrono::steady_clock::now(), + _node.id, _node.get_endpoint().tcpPort(), _node.get_endpoint().udpPort(), std::chrono::steady_clock::now(), pingHash, _replacementNodeEntry}; m_sentPings.insert({_node.get_endpoint(), validation}); } @@ -276,7 +276,7 @@ void NodeTable::schedulePing(Node const& _node) { post(strand_, [this, _node] { ping(_node, {}); }); } -void NodeTable::evict(NodeEntry const& _leastSeen, shared_ptr _replacement) { +void NodeTable::evict(NodeEntry const& _leastSeen, std::shared_ptr _replacement) { if (!m_socket->isOpen()) return; LOG(m_logger) << "Evicting node " << _leastSeen.node; ping(_leastSeen.node, std::move(_replacement)); @@ -284,7 +284,7 @@ void NodeTable::evict(NodeEntry const& _leastSeen, shared_ptr _replac if (m_nodeEventHandler) m_nodeEventHandler->appendEvent(_leastSeen.id(), NodeEntryScheduledForEviction); } -void NodeTable::noteActiveNode(shared_ptr _nodeEntry) { +void NodeTable::noteActiveNode(std::shared_ptr _nodeEntry) { assert(_nodeEntry); if (_nodeEntry->id() == m_hostNodeID) { @@ -300,7 +300,7 @@ void NodeTable::noteActiveNode(shared_ptr _nodeEntry) { LOG(m_logger) << "Active node " << _nodeEntry->node; - shared_ptr nodeToEvict; + std::shared_ptr nodeToEvict; { Guard l(x_state); // Find a bucket to put a node to @@ -349,12 +349,12 @@ void NodeTable::invalidateNode(NodeID const& _id) { sourceNodeEntry->lastPongReceivedTime = RLPXDatagramFace::secondsSinceEpoch() - NodeTable::c_bondingTimeSeconds; } -void NodeTable::dropNode(shared_ptr _n) { +void NodeTable::dropNode(std::shared_ptr _n) { // remove from nodetable { Guard l(x_state); NodeBucket& s = bucket_UNSAFE(_n.get()); - s.nodes.remove_if([_n](weak_ptr const& _bucketEntry) { return _bucketEntry == _n; }); + s.nodes.remove_if([_n](std::weak_ptr const& _bucketEntry) { return _bucketEntry == _n; }); } DEV_GUARDED(x_nodes) { m_allNodes.erase(_n->id()); } @@ -379,7 +379,7 @@ void NodeTable::onPacketReceived(UDPSocketFace*, bi::udp::endpoint const& _from, } } try { - unique_ptr packet = DiscoveryDatagram::interpretUDP(node_ip, _packet); + std::unique_ptr packet = DiscoveryDatagram::interpretUDP(node_ip, _packet); if (!packet) return; if (packet->isExpired()) { LOG(m_logger) << "Expired " << packet->typeName() << " from " << packet->sourceid << "@" << node_ip; @@ -387,7 +387,7 @@ void NodeTable::onPacketReceived(UDPSocketFace*, bi::udp::endpoint const& _from, } LOG(m_logger) << packet->typeName() << " from " << packet->sourceid << "@" << node_ip; - shared_ptr sourceNodeEntry; + std::shared_ptr sourceNodeEntry; switch (packet->packetType()) { case Pong::type: sourceNodeEntry = handlePong(node_ip, *packet); @@ -415,7 +415,7 @@ void NodeTable::onPacketReceived(UDPSocketFace*, bi::udp::endpoint const& _from, } if (sourceNodeEntry) noteActiveNode(std::move(sourceNodeEntry)); - } catch (exception const& _e) { + } catch (std::exception const& _e) { LOG(m_logger) << "Exception processing message from " << node_ip.address().to_string() << ":" << node_ip.port() << ": " << _e.what(); } catch (...) { @@ -423,7 +423,7 @@ void NodeTable::onPacketReceived(UDPSocketFace*, bi::udp::endpoint const& _from, } } -shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, DiscoveryDatagram const& _packet) { +std::shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, DiscoveryDatagram const& _packet) { // validate pong auto const sentPing = m_sentPings.find(_from); if (sentPing == m_sentPings.end()) { @@ -447,13 +447,13 @@ shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, Disc } // create or update nodeEntry with new Pong received time - shared_ptr sourceNodeEntry; + std::shared_ptr sourceNodeEntry; DEV_GUARDED(x_nodes) { auto it = m_allNodes.find(sourceId); if (it == m_allNodes.end()) { - sourceNodeEntry = make_shared(m_hostNodeIDHash, sourceId, - NodeIPEndpoint{_from.address(), _from.port(), nodeValidation.tcpPort}, - RLPXDatagramFace::secondsSinceEpoch(), 0 /* lastPongSentTime */); + sourceNodeEntry = std::make_shared( + m_hostNodeIDHash, sourceId, NodeIPEndpoint{_from.address(), _from.port(), nodeValidation.tcpPort}, + RLPXDatagramFace::secondsSinceEpoch(), 0 /* lastPongSentTime */); // We need to setup external port, as we where able to do ping-pong exchange and node is active sourceNodeEntry->node.external_udp_port = nodeValidation.udpPort; @@ -488,8 +488,9 @@ shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, Disc return sourceNodeEntry; } -shared_ptr NodeTable::handleNeighbours(bi::udp::endpoint const& _from, DiscoveryDatagram const& _packet) { - shared_ptr sourceNodeEntry = nodeEntry(_packet.sourceid); +std::shared_ptr NodeTable::handleNeighbours(bi::udp::endpoint const& _from, + DiscoveryDatagram const& _packet) { + std::shared_ptr sourceNodeEntry = nodeEntry(_packet.sourceid); if (!sourceNodeEntry) { LOG(m_logger) << "Source node (" << _packet.sourceid << "@" << _from << ") not found in node table. Ignoring Neighbours packet."; @@ -504,7 +505,7 @@ shared_ptr NodeTable::handleNeighbours(bi::udp::endpoint const& _from auto const& in = dynamic_cast(_packet); bool expected = false; - auto now = chrono::steady_clock::now(); + auto now = std::chrono::steady_clock::now(); m_sentFindNodes.remove_if([&](NodeIdTimePoint const& _t) noexcept { if (_t.first != in.sourceid) return false; if (now - _t.second < c_reqTimeoutMs) expected = true; @@ -544,7 +545,7 @@ std::shared_ptr NodeTable::handleFindNode(bi::udp::endpoint const& _f } auto const& in = dynamic_cast(_packet); - vector> nearest = nearestNodeEntries(in.target); + std::vector> nearest = nearestNodeEntries(in.target); static unsigned constexpr nlimit = (NodeSocket::maxDatagramSize - 109) / 90; for (unsigned offset = 0; offset < nearest.size(); offset += nlimit) { Neighbours out(_from, nearest, offset, nlimit); @@ -696,15 +697,15 @@ void NodeTable::doDiscovery() { crypto::Nonce::get().ref().copyTo( randNodeId.ref().cropped(static_cast(h256::size), static_cast(h256::size))); LOG(m_logger) << "Starting discovery algorithm run for random node id: " << randNodeId; - doDiscoveryRound(randNodeId, 0 /* round */, make_shared>>()); + doDiscoveryRound(randNodeId, 0 /* round */, std::make_shared>>()); })); } void NodeTable::doHandleTimeouts() { runBackgroundTask(c_handleTimeoutsIntervalMs, m_timeoutsTimer, [this]() { - vector> nodesToActivate; + std::vector> nodesToActivate; for (auto it = m_sentPings.begin(); it != m_sentPings.end();) { - if (chrono::steady_clock::now() > it->second.pingSentTime + m_requestTimeToLive) { + if (std::chrono::steady_clock::now() > it->second.pingSentTime + m_requestTimeToLive) { if (auto node = nodeEntry(it->second.nodeID)) { if (node->lastPongReceivedTime < RLPXDatagramFace::secondsSinceEpoch() - m_requestTimeToLive.count()) { if (it->first == node->endpoint()) { @@ -765,8 +766,9 @@ void NodeTable::cancelTimer(std::shared_ptr _timer) { post(strand_, [_timer] { _timer->expires_at(c_steadyClockMin); }); } -unique_ptr DiscoveryDatagram::interpretUDP(bi::udp::endpoint const& _from, bytesConstRef _packet) { - unique_ptr decoded; +std::unique_ptr DiscoveryDatagram::interpretUDP(bi::udp::endpoint const& _from, + bytesConstRef _packet) { + std::unique_ptr decoded; // h256 + Signature + type + RLP (smallest possible packet is empty neighbours // packet which is 3 bytes) if (_packet.size() < static_cast(h256::size) + static_cast(Signature::size) + 1 + 3) { diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index fb61806d7d..a64cf071b4 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -10,7 +10,6 @@ #include #include -#include "Common.h" #include "ENR.h" #include "EndpointTracker.h" diff --git a/libraries/aleth/libp2p/Peer.cpp b/libraries/aleth/libp2p/Peer.cpp index 58d00c6548..6837e65e46 100644 --- a/libraries/aleth/libp2p/Peer.cpp +++ b/libraries/aleth/libp2p/Peer.cpp @@ -4,8 +4,6 @@ #include "Peer.h" using namespace std; -using namespace dev; -using namespace dev::p2p; namespace dev { diff --git a/libraries/aleth/libp2p/RLPXFrameCoder.cpp b/libraries/aleth/libp2p/RLPXFrameCoder.cpp index 4245a69211..6cc835b416 100644 --- a/libraries/aleth/libp2p/RLPXFrameCoder.cpp +++ b/libraries/aleth/libp2p/RLPXFrameCoder.cpp @@ -11,11 +11,8 @@ #include #include -#include "RLPXPacket.h" #include "RLPxHandshake.h" -using namespace std; -using namespace dev; using namespace dev::p2p; RLPXFrameInfo::RLPXFrameInfo(bytesConstRef _header) @@ -231,16 +228,16 @@ bool RLPXFrameCoder::authAndDecryptFrame(bytesRef io) { return true; } -h128 RLPXFrameCoder::egressDigest() { +dev::h128 RLPXFrameCoder::egressDigest() { CryptoPP::Keccak_256 h(m_impl->egressMac); - h128 digest; + dev::h128 digest; h.TruncatedFinal(digest.data(), h128::size); return digest; } -h128 RLPXFrameCoder::ingressDigest() { +dev::h128 RLPXFrameCoder::ingressDigest() { CryptoPP::Keccak_256 h(m_impl->ingressMac); - h128 digest; + dev::h128 digest; h.TruncatedFinal(digest.data(), h128::size); return digest; } diff --git a/libraries/aleth/libp2p/RLPxHandshake.cpp b/libraries/aleth/libp2p/RLPxHandshake.cpp index 4582a5ce5e..ea4792a10b 100644 --- a/libraries/aleth/libp2p/RLPxHandshake.cpp +++ b/libraries/aleth/libp2p/RLPxHandshake.cpp @@ -4,13 +4,9 @@ #include "RLPxHandshake.h" -#include "Host.h" -#include "Peer.h" -#include "Session.h" -using namespace std; -using namespace dev; +#include + using namespace dev::p2p; -using namespace dev::crypto; constexpr std::chrono::milliseconds RLPXHandshake::c_timeout; @@ -20,10 +16,10 @@ constexpr size_t c_ackCipherSizeBytes = 210; constexpr size_t c_authCipherSizeBytes = 307; } // namespace -RLPXHandshake::RLPXHandshake(shared_ptr ctx, std::shared_ptr const& _socket) +RLPXHandshake::RLPXHandshake(std::shared_ptr ctx, std::shared_ptr const& _socket) : RLPXHandshake(std::move(ctx), _socket, {}) {} -RLPXHandshake::RLPXHandshake(shared_ptr ctx, std::shared_ptr const& _socket, +RLPXHandshake::RLPXHandshake(std::shared_ptr ctx, std::shared_ptr const& _socket, NodeID _remote) : host_ctx_(std::move(ctx)), m_remote(_remote), @@ -35,7 +31,7 @@ RLPXHandshake::RLPXHandshake(shared_ptr ctx, std::shared_ptr< m_logger.add_attribute("Prefix", prefixAttr); m_errorLogger.add_attribute("Prefix", prefixAttr); - stringstream remoteInfoStream; + std::stringstream remoteInfoStream; remoteInfoStream << "(" << _remote; if (remoteSocketConnected()) remoteInfoStream << "@" << m_socket->remoteEndpoint(); remoteInfoStream << ")"; @@ -97,7 +93,7 @@ void RLPXHandshake::writeAckEIP8() { m_ack.resize(m_ack.size() + padAmount, 0); bytes prefix(2); - toBigEndian(m_ack.size() + c_eciesOverhead, prefix); + toBigEndian(m_ack.size() + crypto::c_eciesOverhead, prefix); encryptECIES(m_remote, bytesConstRef(&prefix), &m_ack, m_ackCipher); m_ackCipher.insert(m_ackCipher.begin(), prefix.begin(), prefix.end()); @@ -219,7 +215,7 @@ void RLPXHandshake::cancel() { void RLPXHandshake::error(boost::system::error_code _ech) { host_ctx_->on_failure(m_remote, m_failureReason); - stringstream errorStream; + std::stringstream errorStream; errorStream << "Handshake failed"; if (_ech) errorStream << " (I/O error: " << _ech.message() << ")"; if (remoteSocketConnected()) diff --git a/libraries/aleth/libp2p/RLPxHandshake.h b/libraries/aleth/libp2p/RLPxHandshake.h index ae16c796c2..668723cb8a 100644 --- a/libraries/aleth/libp2p/RLPxHandshake.h +++ b/libraries/aleth/libp2p/RLPxHandshake.h @@ -11,9 +11,6 @@ #include "RLPXFrameCoder.h" #include "RLPXSocket.h" -namespace ba = boost::asio; -namespace bi = boost::asio::ip; - namespace dev { namespace p2p { diff --git a/libraries/aleth/libp2p/Session.cpp b/libraries/aleth/libp2p/Session.cpp index 74228ed798..da2409fab8 100644 --- a/libraries/aleth/libp2p/Session.cpp +++ b/libraries/aleth/libp2p/Session.cpp @@ -5,21 +5,18 @@ #include "Session.h" #include -#include #include +#include #include -#include "Host.h" #include "RLPXFrameCoder.h" -using namespace std; -using namespace dev; using namespace dev::p2p; static constexpr uint32_t MIN_COMPRESSION_SIZE = 500; -Session::Session(SessionCapabilities caps, unique_ptr _io, std::shared_ptr _s, +Session::Session(SessionCapabilities caps, std::unique_ptr _io, std::shared_ptr _s, std::shared_ptr _n, PeerSessionInfo _info, std::optional immediate_disconnect_reason) : m_capabilities(std::move(caps)), @@ -27,9 +24,9 @@ Session::Session(SessionCapabilities caps, unique_ptr _io, std:: m_socket(std::move(_s)), m_peer(std::move(_n)), m_info(std::move(_info)), - m_ping(chrono::steady_clock::time_point::max()), + m_ping(std::chrono::steady_clock::time_point::max()), immediate_disconnect_reason_(immediate_disconnect_reason) { - stringstream remoteInfoStream; + std::stringstream remoteInfoStream; remoteInfoStream << "(" << m_info.id << "@" << m_socket->remoteEndpoint() << ")"; m_logSuffix = remoteInfoStream.str(); auto const attr = boost::log::attributes::constant{remoteInfoStream.str()}; @@ -45,8 +42,8 @@ Session::Session(SessionCapabilities caps, unique_ptr _io, std:: std::shared_ptr Session::make(SessionCapabilities caps, std::unique_ptr _io, std::shared_ptr _s, std::shared_ptr _n, PeerSessionInfo _info, std::optional immediate_disconnect_reason) { - shared_ptr ret(new Session(std::move(caps), std::move(_io), std::move(_s), std::move(_n), std::move(_info), - immediate_disconnect_reason)); + std::shared_ptr ret(new Session(std::move(caps), std::move(_io), std::move(_s), std::move(_n), + std::move(_info), immediate_disconnect_reason)); if (immediate_disconnect_reason) { ret->disconnect_(*immediate_disconnect_reason); return ret; @@ -64,7 +61,7 @@ std::shared_ptr Session::make(SessionCapabilities caps, std::unique_ptr Session::~Session() { cnetlog << "Closing peer session with " << m_logSuffix; - m_peer->m_lastConnected = m_peer->m_lastAttempted.load() - chrono::seconds(1); + m_peer->m_lastConnected = m_peer->m_lastAttempted.load() - std::chrono::seconds(1); drop(ClientQuit); } @@ -76,7 +73,7 @@ void Session::readPacket(unsigned _packetType, RLP const& _r) { auto packet_type_str = capabilityPacketTypeToString(cap, _packetType); LOG(m_netLoggerDetail) << "Received " << packet_type_str << " (" << _packetType << ") from"; if (_packetType < UserPacket) { - string err_msg; + std::string err_msg; try { interpretP2pPacket(static_cast(_packetType), _r); } catch (RLPException const& e) { @@ -103,7 +100,7 @@ void Session::interpretP2pPacket(P2pPacketType _t, RLP const& _r) { LOG(m_capLoggerDetail) << p2pPacketTypeToString(_t) << " from"; switch (_t) { case DisconnectPacket: { - string reason = "Unspecified"; + std::string reason = "Unspecified"; auto r = (DisconnectReason)_r[0].toInt(); if (!_r[0].isInt()) drop(BadProtocol); @@ -123,12 +120,12 @@ void Session::interpretP2pPacket(P2pPacketType _t, RLP const& _r) { case PongPacket: { std::unique_lock l(x_info); m_info.lastPing = std::chrono::steady_clock::now() - m_ping; - LOG(m_capLoggerDetail) << "Ping latency: " << chrono::duration_cast(m_info.lastPing).count() - << " ms"; + LOG(m_capLoggerDetail) << "Ping latency: " + << std::chrono::duration_cast(m_info.lastPing).count() << " ms"; break; } default: - stringstream ss; + std::stringstream ss; ss << "Unknown p2p packet type: " << _t; throw UnknownP2PPacketType(ss.str()); } @@ -141,7 +138,7 @@ void Session::ping_() { m_ping = std::chrono::steady_clock::now(); } -RLPStream& Session::prep(RLPStream& _s, P2pPacketType _id, unsigned _args) { +dev::RLPStream& Session::prep(RLPStream& _s, P2pPacketType _id, unsigned _args) { return _s.append((unsigned)_id).appendList(_args); } @@ -355,7 +352,7 @@ void Session::doRead() { << packetType << " (" << packet_type_str << "). Frame Size: " << frame.size() << ". Size encoded in RLP: " << RLP(frame.cropped(1), RLP::LaissezFaire).actualSize() - << ". Message: " << toHex(frame) << endl; + << ". Message: " << toHex(frame) << std::endl; disconnect_(BadProtocol); return; } @@ -372,7 +369,7 @@ void Session::doRead() { "corrupted): " << packetType << " (" << packet_type_str << "). Frame Size: " << frame.size() << ". Size encoded in RLP: " << RLP(frame.cropped(1), RLP::LaissezFaire).actualSize() - << ". Message: " << toHex(frame) << endl; + << ". Message: " << toHex(frame) << std::endl; disconnect_(BadProtocol); return; } diff --git a/libraries/aleth/libp2p/Session.h b/libraries/aleth/libp2p/Session.h index 2c2244e934..b84732c17e 100644 --- a/libraries/aleth/libp2p/Session.h +++ b/libraries/aleth/libp2p/Session.h @@ -8,15 +8,11 @@ #include #include -#include #include #include -#include -#include #include #include -#include "Capability.h" #include "Common.h" #include "Peer.h" #include "RLPXSocket.h" diff --git a/libraries/aleth/libp2p/UDP.cpp b/libraries/aleth/libp2p/UDP.cpp index a9a142c6b0..b199519096 100644 --- a/libraries/aleth/libp2p/UDP.cpp +++ b/libraries/aleth/libp2p/UDP.cpp @@ -3,11 +3,8 @@ // Licensed under the GNU General Public License, Version 3. #include "UDP.h" -using namespace std; -using namespace dev; -using namespace dev::p2p; -h256 RLPXDatagramFace::sign(Secret const& _k) { +dev::h256 dev::p2p::RLPXDatagramFace::sign(Secret const& _k) { assert(packetType()); RLPStream rlpxstream; @@ -35,7 +32,7 @@ h256 RLPXDatagramFace::sign(Secret const& _k) { return hash; } -Public RLPXDatagramFace::authenticate(bytesConstRef _sig, bytesConstRef _rlp) { +dev::Public dev::p2p::RLPXDatagramFace::authenticate(bytesConstRef _sig, bytesConstRef _rlp) { Signature const& sig = *(Signature const*)_sig.data(); return dev::recover(sig, sha3(_rlp)); } diff --git a/libraries/aleth/libp2p/UDP.h b/libraries/aleth/libp2p/UDP.h index d168cf4ad8..72310e5695 100644 --- a/libraries/aleth/libp2p/UDP.h +++ b/libraries/aleth/libp2p/UDP.h @@ -15,11 +15,8 @@ #include #include #include -#include #include "Common.h" -namespace ba = boost::asio; -namespace bi = ba::ip; namespace dev { namespace p2p { diff --git a/libraries/aleth/libp2p/UPnP.cpp b/libraries/aleth/libp2p/UPnP.cpp index bcda5f2c0d..733202b0c0 100644 --- a/libraries/aleth/libp2p/UPnP.cpp +++ b/libraries/aleth/libp2p/UPnP.cpp @@ -15,8 +15,7 @@ #include #include #include -using namespace std; -using namespace dev; + using namespace dev::p2p; UPnP::UPnP() @@ -82,7 +81,7 @@ UPnP::~UPnP() { for (auto i : r) removeRedirect(i); } -string UPnP::externalIP() { +std::string UPnP::externalIP() { #if ETH_MINIUPNPC char addr[16]; if (!UPNP_GetExternalIPAddress(m_urls->controlURL, m_data->first.servicetype, addr)) return addr; diff --git a/libraries/aleth/libp2p/UPnP.h b/libraries/aleth/libp2p/UPnP.h index abd4243bb3..03fe6966ff 100644 --- a/libraries/aleth/libp2p/UPnP.h +++ b/libraries/aleth/libp2p/UPnP.h @@ -7,7 +7,6 @@ #include #include #include -#include struct UPNPUrls; struct IGDdatas; diff --git a/libraries/aleth/libp2p/taraxa.hpp b/libraries/aleth/libp2p/taraxa.hpp index 288a13b458..7250bb65e3 100644 --- a/libraries/aleth/libp2p/taraxa.hpp +++ b/libraries/aleth/libp2p/taraxa.hpp @@ -1,6 +1,6 @@ #pragma once -#include "libp2p/Common.h" +#include "Common.h" namespace dev::p2p { diff --git a/libraries/cli/include/cli/config.hpp b/libraries/cli/include/cli/config.hpp index 41aa55e7e4..c03478efb2 100644 --- a/libraries/cli/include/cli/config.hpp +++ b/libraries/cli/include/cli/config.hpp @@ -3,7 +3,6 @@ #include #include -#include "cli/configs.hpp" #include "config/config.hpp" namespace taraxa::cli { diff --git a/libraries/cli/include/cli/config_jsons/default/default_config.json b/libraries/cli/include/cli/config_jsons/default/default_config.json index 72e9409c3f..0f72c0bc9b 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_config.json +++ b/libraries/cli/include/cli/config_jsons/default/default_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 10000000, "peer_max_packets_queue_size_limit": 100000, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index 322d177b10..90f23f34b2 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -119,6 +119,12 @@ "block_num": 20, "pillar_blocks_interval": 10, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" + }, + "cornus_hf": { + "block_num": 100, + "delegation_locking_period": 5, + "dag_gas_limit": "0x1908B100", + "pbft_gas_limit": "0x7d2b7500" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json index 47d0713725..2f812df6f4 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 10000000, "peer_max_packets_queue_size_limit": 100000, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 51fb60004b..112ef5e0a7 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -284,6 +284,12 @@ "block_num": 100, "pillar_blocks_interval": 10, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" + }, + "cornus_hf": { + "block_num": 0, + "delegation_locking_period": 5, + "dag_gas_limit": "0x1908B100", + "pbft_gas_limit": "0x7d2b7500" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json index 66cb1b4499..f82d06c4f2 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 0, "peer_max_packets_queue_size_limit": 0, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 10a7c5e93a..b00f1234d6 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1653,6 +1653,12 @@ "block_num": 11616000, "pillar_blocks_interval": 4000, "bridge_contract_address": "0xe126E0BaeAE904b8Cfd619Be1A8667A173b763a1" + }, + "cornus_hf": { + "block_num": 15276000, + "delegation_locking_period": 163459, + "dag_gas_limit": "0x1908B100", + "pbft_gas_limit": "0x7d2b7500" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json index d444117b3c..2667146a60 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json @@ -24,13 +24,13 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 0, "peer_max_packets_queue_size_limit": 0, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, "transaction_interval_ms": 100, "ideal_peer_count": 10, - "max_peer_count": 50, + "max_peer_count": 20, "sync_level_size": 10, "packets_processing_threads": 14, "peer_blacklist_timeout": 600, diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 9d34feb1c0..6062b19f03 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -4,7 +4,7 @@ "level": "0x0", "pivot": "0x0000000000000000000000000000000000000000000000000000000000000000", "sig": "0xb7e22d46c1ba94d5e8347b01d137b5c428fcbbeaf0a77fb024cbbf1517656ff00d04f7f25be608c321b0d7483c402c294ff46c49b265305d046a52236c0a363701", - "timestamp": "0x669f7f20", + "timestamp": "0x6750605A", "tips": [], "transactions": [] }, @@ -105,13 +105,13 @@ "dag_blocks_size": "0x32", "ghost_path_move_back": "0x0", "lambda_ms": "0x5DC", - "gas_limit": "0x7d2b7500" + "gas_limit": "0x12C684C0" }, "dag": { "block_proposer": { "shard": 1 }, - "gas_limit": "0x1908B100" + "gas_limit": "0x1E0A6E0" }, "sortition": { "changes_count_for_average": 10, @@ -153,6 +153,12 @@ "block_num": 1000, "pillar_blocks_interval": 1000, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" + }, + "cornus_hf": { + "block_num": 1000, + "delegation_locking_period": 5, + "dag_gas_limit": "0x1908B100", + "pbft_gas_limit": "0x7d2b7500" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_updater.hpp b/libraries/cli/include/cli/config_updater.hpp index a0b9d8577c..84f88edd5a 100644 --- a/libraries/cli/include/cli/config_updater.hpp +++ b/libraries/cli/include/cli/config_updater.hpp @@ -1,8 +1,9 @@ #pragma once -#include +#include -#include "config/config.hpp" +#include +#include namespace taraxa::cli { diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index 3023f3fbbe..35db2c4c13 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -117,8 +117,8 @@ Config::Config(int argc, const char* argv[]) { "Log channels to log in addition to log channels defined in config: [channel:level, ....]"); node_command_options.add_options()(LOG_CONFIGURATIONS, bpo::value>(&log_configurations)->multitoken(), - "Log confifugrations to use: [configuration_name, ....]"); - node_command_options.add_options()(NODE_SECRET, bpo::value(&node_secret), "Nose secret key to use"); + "Log configurations to use: [configuration_name, ....]"); + node_command_options.add_options()(NODE_SECRET, bpo::value(&node_secret), "Node secret key to use"); node_command_options.add_options()(VRF_SECRET, bpo::value(&vrf_secret), "Vrf secret key to use"); diff --git a/libraries/cli/src/config_updater.cpp b/libraries/cli/src/config_updater.cpp index 52fee01e70..bfd538f817 100644 --- a/libraries/cli/src/config_updater.cpp +++ b/libraries/cli/src/config_updater.cpp @@ -1,8 +1,6 @@ #include "cli/config_updater.hpp" #include "cli/tools.hpp" -#include "common/jsoncpp.hpp" -#include "config/version.hpp" namespace taraxa::cli { @@ -19,7 +17,7 @@ auto NetworkIPChange = [](Json::Value&, const Json::Value&) { ConfigUpdater::ConfigUpdater(int chain_id) { new_conf_ = tools::getConfig(static_cast(chain_id)); - // Regiser changes that should apply + // Register changes that should apply config_changes_.emplace_back(NetworkIPChange); } diff --git a/libraries/cli/src/tools.cpp b/libraries/cli/src/tools.cpp index 67a87e8263..26d03b0ad6 100644 --- a/libraries/cli/src/tools.cpp +++ b/libraries/cli/src/tools.cpp @@ -4,14 +4,13 @@ #include #include -#include #include "cli/config.hpp" +#include "cli/configs.hpp" #include "common/jsoncpp.hpp" using namespace std; using namespace dev; -namespace fs = std::filesystem; namespace taraxa::cli::tools { diff --git a/libraries/common/CMakeLists.txt b/libraries/common/CMakeLists.txt index d4aef9ef0b..fef03821c7 100644 --- a/libraries/common/CMakeLists.txt +++ b/libraries/common/CMakeLists.txt @@ -1,6 +1,6 @@ set(HEADERS include/common/constants.hpp - include/common/static_init.hpp + include/common/init.hpp include/common/types.hpp include/common/config_exception.hpp include/common/vrf_wrapper.hpp diff --git a/libraries/common/include/common/constants.hpp b/libraries/common/include/common/constants.hpp index 5d7e4540e1..aea008f1ac 100644 --- a/libraries/common/include/common/constants.hpp +++ b/libraries/common/include/common/constants.hpp @@ -8,7 +8,6 @@ namespace taraxa { GLOBAL_CONST(h256, ZeroHash); -GLOBAL_CONST(h256, EmptySHA3); GLOBAL_CONST(h256, EmptyRLPListSHA3); GLOBAL_CONST(h64, EmptyNonce); GLOBAL_CONST(u256, ZeroU256); @@ -29,8 +28,10 @@ const uint64_t kMinTxGas{21000}; constexpr uint32_t kMinTransactionPoolSize{30000}; constexpr uint32_t kDefaultTransactionPoolSize{200000}; +constexpr uint32_t kMaxNonFinalizedTransactions{1000000}; +constexpr uint32_t kMaxNonFinalizedDagBlocks{100}; -const size_t kV2NetworkVersion = 2; +const size_t kV3NetworkVersion = 3; const uint32_t kRecentlyFinalizedTransactionsFactor = 2; diff --git a/libraries/common/include/common/encoding_rlp.hpp b/libraries/common/include/common/encoding_rlp.hpp index 90cb6918ee..8cd6fd8594 100644 --- a/libraries/common/include/common/encoding_rlp.hpp +++ b/libraries/common/include/common/encoding_rlp.hpp @@ -3,10 +3,6 @@ #include #include -#include -#include - -#include "common/util.hpp" namespace taraxa::util { using dev::RLP; @@ -41,7 +37,7 @@ inline auto rlp(RLPEncoderRef encoding, T const& target) -> decltype(target.rlp( inline auto rlp(RLPEncoderRef encoding, std::string const& target) { encoding.append(target); } -inline auto rlp(RLPEncoderRef encoding, bytes const& target) { encoding.append(target); } +inline auto rlp(RLPEncoderRef encoding, dev::bytes const& target) { encoding.append(target); } template void rlp(RLPEncoderRef encoding, std::optional const& target) { @@ -106,7 +102,7 @@ void rlp(RLPDecoderRef encoding, dev::FixedHash& target) { inline auto rlp(RLPDecoderRef encoding, std::string& target) { target = encoding.value.toString(encoding.strictness); } -inline auto rlp(RLPDecoderRef encoding, bytes& target) { target = encoding.value.toBytes(encoding.strictness); } +inline auto rlp(RLPDecoderRef encoding, dev::bytes& target) { target = encoding.value.toBytes(encoding.strictness); } template void rlp(RLPDecoderRef encoding, std::optional& target) { @@ -194,14 +190,14 @@ T rlp_dec(RLPDecoderRef encoding) { } template -bytes const& rlp_enc(RLPEncoderRef encoder_to_reuse, T const& obj) { +dev::bytes const& rlp_enc(RLPEncoderRef encoder_to_reuse, T const& obj) { encoder_to_reuse.clear(); rlp(encoder_to_reuse, obj); return encoder_to_reuse.out(); } template -bytes rlp_enc(T const& obj) { +dev::bytes rlp_enc(T const& obj) { dev::RLPStream s; rlp(s, obj); return std::move(s.invalidate()); diff --git a/libraries/common/include/common/global_const.hpp b/libraries/common/include/common/global_const.hpp index aca855643b..4abffacf6e 100644 --- a/libraries/common/include/common/global_const.hpp +++ b/libraries/common/include/common/global_const.hpp @@ -1,7 +1,5 @@ #pragma once -#include - #define GLOBAL_CONST(_type_, _name_) _type_ const &_name_() #define GLOBAL_CONST_DEF(_name_, _init_) \ diff --git a/libraries/common/include/common/init.hpp b/libraries/common/include/common/init.hpp new file mode 100644 index 0000000000..59fbeb7809 --- /dev/null +++ b/libraries/common/include/common/init.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include +#include + +#include "common/util.hpp" + +namespace taraxa { + +inline void static_init() { + if (sodium_init() == -1) { + throw std::runtime_error("libsodium init failure"); + } +} + +inline bool checkDiskSpace(const std::string& path, uint64_t required_space_MB) { + // Convert MB to bytes + const uint64_t required_space_bytes = required_space_MB * 1024 * 1024; + + struct statvfs stat; + + // Get file system statistics + if (statvfs(path.c_str(), &stat) != 0) { + // If statvfs fails, return true + std::cerr << "Error getting file system stats" << std::endl; + return true; + } + + // Calculate available space + const uint64_t available_space = stat.f_bsize * stat.f_bavail; + + // Check if available space is greater than or equal to the required space + return available_space >= required_space_bytes; +} + +} // namespace taraxa diff --git a/libraries/common/include/common/static_init.hpp b/libraries/common/include/common/static_init.hpp deleted file mode 100644 index f39cf34cce..0000000000 --- a/libraries/common/include/common/static_init.hpp +++ /dev/null @@ -1,15 +0,0 @@ -#pragma once - -#include - -#include "common/util.hpp" - -namespace taraxa { - -inline void static_init() { - if (sodium_init() == -1) { - throw std::runtime_error("libsodium init failure"); - } -} - -} // namespace taraxa diff --git a/libraries/common/include/common/thread_pool.hpp b/libraries/common/include/common/thread_pool.hpp index 7c7b40d44e..49ca42f286 100644 --- a/libraries/common/include/common/thread_pool.hpp +++ b/libraries/common/include/common/thread_pool.hpp @@ -1,7 +1,6 @@ #pragma once #include -#include #include "common/functional.hpp" diff --git a/libraries/common/include/common/types.hpp b/libraries/common/include/common/types.hpp index 661e82f3dd..b04a376a3a 100644 --- a/libraries/common/include/common/types.hpp +++ b/libraries/common/include/common/types.hpp @@ -6,15 +6,9 @@ #include #include -#include -#include -#include -#include namespace taraxa { -namespace fs = std::filesystem; - using dev::Address; using dev::AddressSet; using dev::bytes; diff --git a/libraries/common/include/common/util.hpp b/libraries/common/include/common/util.hpp index 737d84d317..82fc75bcf2 100644 --- a/libraries/common/include/common/util.hpp +++ b/libraries/common/include/common/util.hpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include @@ -11,18 +10,13 @@ #include #include #include -#include #include #include -#include -#include #include #include #include #include -#include "common/types.hpp" - namespace taraxa { /** diff --git a/libraries/common/include/common/vrf_wrapper.hpp b/libraries/common/include/common/vrf_wrapper.hpp index d233b6de70..f11a5af956 100644 --- a/libraries/common/include/common/vrf_wrapper.hpp +++ b/libraries/common/include/common/vrf_wrapper.hpp @@ -2,11 +2,8 @@ #include -#include - #include "common/types.hpp" -#include "common/util.hpp" -#include "sodium.h" +#include "sodium/crypto_vrf.h" namespace taraxa::vrf_wrapper { @@ -21,7 +18,7 @@ vrf_pk_t getVrfPublicKey(vrf_sk_t const &sk); bool isValidVrfPublicKey(vrf_pk_t const &pk); // get proof if public is valid std::optional getVrfProof(vrf_sk_t const &pk, bytes const &msg); -// get output if proff is valid +// get output if proof is valid std::optional getVrfOutput(vrf_pk_t const &pk, vrf_proof_t const &proof, bytes const &msg, bool strict = true); diff --git a/libraries/common/src/constants.cpp b/libraries/common/src/constants.cpp index cd40cbf508..b7b9e009f9 100644 --- a/libraries/common/src/constants.cpp +++ b/libraries/common/src/constants.cpp @@ -6,7 +6,6 @@ namespace taraxa { GLOBAL_CONST_DEF(ZeroHash, {}) -GLOBAL_CONST_DEF(EmptySHA3, dev::sha3(dev::bytesConstRef())) GLOBAL_CONST_DEF(EmptyRLPListSHA3, dev::sha3(dev::RLPStream(0).out())) GLOBAL_CONST_DEF(EmptyNonce, {}) GLOBAL_CONST_DEF(ZeroU256, {}) diff --git a/libraries/common/src/jsoncpp.cpp b/libraries/common/src/jsoncpp.cpp index ec4a9c55f8..8371e25575 100644 --- a/libraries/common/src/jsoncpp.cpp +++ b/libraries/common/src/jsoncpp.cpp @@ -1,6 +1,14 @@ #include "common/jsoncpp.hpp" -#include "common/util.hpp" +#include +#include + +#include +#include +#include +#include +#include +#include namespace taraxa::util { diff --git a/libraries/common/src/util.cpp b/libraries/common/src/util.cpp index a8c8190aa6..70845183f0 100644 --- a/libraries/common/src/util.cpp +++ b/libraries/common/src/util.cpp @@ -1,8 +1,5 @@ #include "common/util.hpp" -#include -#include -#include namespace taraxa { std::string jsonToUnstyledString(const Json::Value &value) { diff --git a/libraries/config/include/config/config.hpp b/libraries/config/include/config/config.hpp index 0ba2972caa..c5fa6e0b79 100644 --- a/libraries/config/include/config/config.hpp +++ b/libraries/config/include/config/config.hpp @@ -1,7 +1,5 @@ #pragma once -#include "common/config_exception.hpp" -#include "common/util.hpp" #include "common/vrf_wrapper.hpp" #include "config/genesis.hpp" #include "config/network.hpp" @@ -60,6 +58,8 @@ struct FullNodeConfig { bool enable_test_rpc = false; bool enable_debug = false; uint32_t final_chain_cache_in_blocks = 5; + uint64_t propose_dag_gas_limit = 0x1E0A6E0; + uint64_t propose_pbft_gas_limit = 0x12C684C0; // config values that limits transactions pool uint32_t transactions_pool_size = kDefaultTransactionPoolSize; @@ -70,7 +70,7 @@ struct FullNodeConfig { auto net_file_path() const { return data_path / "net"; } /** - * @brief Validates config values, throws configexception if validation failes + * @brief Validates config values, throws configexception if validation fails * @return */ void validate() const; diff --git a/libraries/config/include/config/config_utils.hpp b/libraries/config/include/config/config_utils.hpp index e5b168d85f..c82ee77987 100644 --- a/libraries/config/include/config/config_utils.hpp +++ b/libraries/config/include/config/config_utils.hpp @@ -1,10 +1,12 @@ #pragma once +#include #include +#include -#include "common/config_exception.hpp" -#include "common/types.hpp" -#include "common/util.hpp" +namespace Json { +class Value; +} // namespace Json namespace taraxa { std::string getConfigErr(const std::vector &path); @@ -12,13 +14,11 @@ std::string getConfigErr(const std::vector &path); Json::Value getConfigData(Json::Value root, const std::vector &path, bool optional = false); std::string getConfigDataAsString(const Json::Value &root, const std::vector &path, bool optional = false, - std::string value = {}); + const std::string &value = {}); -uint32_t getConfigDataAsUInt(const Json::Value &root, const std::vector &path, bool optional = false, +uint64_t getConfigDataAsUInt(const Json::Value &root, const std::vector &path, bool optional = false, uint32_t value = 0); -uint64_t getConfigDataAsUInt64(const Json::Value &root, const std::vector &path); - bool getConfigDataAsBoolean(const Json::Value &root, const std::vector &path, bool optional = false, bool value = false); diff --git a/libraries/config/include/config/genesis.hpp b/libraries/config/include/config/genesis.hpp index 4963214807..11ef9b42e4 100644 --- a/libraries/config/include/config/genesis.hpp +++ b/libraries/config/include/config/genesis.hpp @@ -2,19 +2,12 @@ #include -#include -#include - -#include "common/lazy.hpp" #include "config/dag_config.hpp" #include "config/pbft_config.hpp" #include "config/state_config.hpp" #include "dag/dag_block.hpp" namespace taraxa { -using std::string; -using std::unordered_map; -using ::taraxa::util::lazy::LazyVal; struct GasPriceConfig { uint64_t percentile = 60; @@ -41,6 +34,7 @@ struct GenesisConfig { bytes rlp() const; blk_hash_t genesisHash() const; void updateBlocksPerYear(); + std::pair getGasLimits(uint64_t block_number) const; }; Json::Value enc_json(GenesisConfig const& obj); diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 1e1c95c5b6..4357eb9bd7 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -5,6 +5,7 @@ #include "common/encoding_rlp.hpp" #include "common/types.hpp" +namespace taraxa { struct Redelegation { taraxa::addr_t validator; taraxa::addr_t delegator; @@ -71,6 +72,17 @@ struct FicusHardforkConfig { Json::Value enc_json(const FicusHardforkConfig& obj); void dec_json(const Json::Value& json, FicusHardforkConfig& obj); +struct CornusHardforkConfig { + uint64_t block_num = -1; + uint32_t delegation_locking_period = 5; // number of blocks + uint64_t dag_gas_limit = 0; + uint64_t pbft_gas_limit = 0; + + HAS_RLP_FIELDS +}; +Json::Value enc_json(const CornusHardforkConfig& obj); +void dec_json(const Json::Value& json, CornusHardforkConfig& obj); + // Keeping it for next HF // struct BambooRedelegation { // taraxa::addr_t validator; @@ -129,8 +141,16 @@ struct HardforksConfig { // Ficus hardfork: implementation of pillar chain FicusHardforkConfig ficus_hf; + // Cornus hf - support multiple undelegations from the same validator at the same time + // - change of delegation locking period + // - change gas limit + CornusHardforkConfig cornus_hf; + + bool isOnCornusHardfork(uint64_t block_number) const { return block_number >= cornus_hf.block_num; } + HAS_RLP_FIELDS }; Json::Value enc_json(const HardforksConfig& obj); void dec_json(const Json::Value& json, HardforksConfig& obj); +} // namespace taraxa diff --git a/libraries/config/include/config/network.hpp b/libraries/config/include/config/network.hpp index 49253ca962..e084986be2 100644 --- a/libraries/config/include/config/network.hpp +++ b/libraries/config/include/config/network.hpp @@ -5,6 +5,7 @@ #include #include "common/types.hpp" +#include "libp2p/Common.h" namespace taraxa { @@ -22,6 +23,8 @@ struct ConnectionConfig { // Number of threads dedicated to the rpc calls processing, default = 5 uint16_t threads_num{5}; + uint32_t max_pending_tasks{100}; + void validate() const; }; @@ -53,6 +56,12 @@ struct DdosProtectionConfig { // Max packets queue size, 0 means unlimited uint64_t max_packets_queue_size{0}; + // Time of allowed queue over the limit + std::chrono::milliseconds queue_limit_time{5000}; + + // Time period between disconnecting peers + std::chrono::milliseconds peer_disconnect_interval{5000}; + void validate(uint32_t delegation_delay) const; }; @@ -74,6 +83,7 @@ struct NetworkConfig { bool disable_peer_blacklist = false; uint16_t deep_syncing_threshold = 10; DdosProtectionConfig ddos_protection; + std::unordered_set trusted_nodes; std::optional rpc; std::optional graphql; diff --git a/libraries/config/include/config/state_config.hpp b/libraries/config/include/config/state_config.hpp index 338f28e23c..a529073e26 100644 --- a/libraries/config/include/config/state_config.hpp +++ b/libraries/config/include/config/state_config.hpp @@ -1,7 +1,6 @@ #pragma once #include -#include #include "common/encoding_rlp.hpp" #include "common/types.hpp" diff --git a/libraries/config/src/config.cpp b/libraries/config/src/config.cpp index 26c915b210..f91b75f979 100644 --- a/libraries/config/src/config.cpp +++ b/libraries/config/src/config.cpp @@ -4,7 +4,7 @@ #include -#include "common/jsoncpp.hpp" +#include "common/config_exception.hpp" #include "common/thread_pool.hpp" #include "config/config_utils.hpp" @@ -54,8 +54,8 @@ std::vector FullNodeConfig::loadLoggingConfigs(const Json::Value output.target = log_path; output.file_name = (log_path / getConfigDataAsString(o, {"file_name"})).string(); output.format = getConfigDataAsString(o, {"format"}); - output.max_size = getConfigDataAsUInt64(o, {"max_size"}); - output.rotation_size = getConfigDataAsUInt64(o, {"rotation_size"}); + output.max_size = getConfigDataAsUInt(o, {"max_size"}); + output.rotation_size = getConfigDataAsUInt(o, {"rotation_size"}); output.time_based_rotation = getConfigDataAsString(o, {"time_based_rotation"}); } logging.outputs.push_back(output); @@ -108,6 +108,9 @@ FullNodeConfig::FullNodeConfig(const Json::Value &string_or_object, const Json:: genesis = GenesisConfig(); } + propose_dag_gas_limit = getConfigDataAsUInt(root, {"propose_dag_gas_limit"}, true, propose_dag_gas_limit); + propose_pbft_gas_limit = getConfigDataAsUInt(root, {"propose_pbft_gas_limit"}, true, propose_pbft_gas_limit); + is_light_node = getConfigDataAsBoolean(root, {"is_light_node"}, true, is_light_node); const auto min_light_node_history = (genesis.state.dpos.blocks_per_year * kDefaultLightNodeHistoryDays) / 365; light_node_history = getConfigDataAsUInt(root, {"light_node_history"}, true, min_light_node_history); diff --git a/libraries/config/src/config_utils.cpp b/libraries/config/src/config_utils.cpp index bdef058854..a3946d6b4f 100644 --- a/libraries/config/src/config_utils.cpp +++ b/libraries/config/src/config_utils.cpp @@ -1,5 +1,14 @@ #include "config/config_utils.hpp" +#include +#include +#include + +#include + +#include "common/config_exception.hpp" +#include "libdevcore/CommonJS.h" + namespace taraxa { std::string getConfigErr(const std::vector &path) { @@ -20,7 +29,7 @@ Json::Value getConfigData(Json::Value root, const std::vector &path } std::string getConfigDataAsString(const Json::Value &root, const std::vector &path, bool optional, - std::string value) { + const std::string &value) { try { Json::Value ret = getConfigData(root, path, optional); if (ret.isNull()) { @@ -36,14 +45,14 @@ std::string getConfigDataAsString(const Json::Value &root, const std::vector &path, bool optional, +uint64_t getConfigDataAsUInt(const Json::Value &root, const std::vector &path, bool optional, uint32_t value) { try { Json::Value ret = getConfigData(root, path, optional); if (ret.isNull()) { return value; } else { - return ret.asUInt(); + return dev::getUInt(ret); } } catch (Json::Exception &e) { if (optional) { @@ -53,14 +62,6 @@ uint32_t getConfigDataAsUInt(const Json::Value &root, const std::vector &path) { - try { - return getConfigData(root, path).asUInt64(); - } catch (Json::Exception &e) { - throw ConfigException(getConfigErr(path) + e.what()); - } -} - bool getConfigDataAsBoolean(const Json::Value &root, const std::vector &path, bool optional, bool value) { try { Json::Value ret = getConfigData(root, path, optional); diff --git a/libraries/config/src/genesis.cpp b/libraries/config/src/genesis.cpp index a48b66c013..3f5649af99 100644 --- a/libraries/config/src/genesis.cpp +++ b/libraries/config/src/genesis.cpp @@ -2,13 +2,10 @@ #include -#include - #include "common/config_exception.hpp" #include "libdevcore/SHA3.h" namespace taraxa { -using std::stringstream; Json::Value enc_json(GasPriceConfig const& obj) { Json::Value json(Json::objectValue); @@ -89,10 +86,10 @@ GenesisConfig::GenesisConfig() { pbft.committee_size = 5; pbft.dag_blocks_size = 100; pbft.ghost_path_move_back = 1; - pbft.gas_limit = 60000000; + pbft.gas_limit = 315000000; // DAG config - dag.gas_limit = 10000000; + dag.gas_limit = 315000000; // DPOS config auto& dpos = state.dpos; @@ -132,4 +129,11 @@ bytes GenesisConfig::rlp() const { blk_hash_t GenesisConfig::genesisHash() const { return dev::sha3(rlp()); } +std::pair GenesisConfig::getGasLimits(uint64_t block_number) const { + if (state.hardforks.isOnCornusHardfork(block_number)) { + return {state.hardforks.cornus_hf.dag_gas_limit, state.hardforks.cornus_hf.pbft_gas_limit}; + } + return {dag.gas_limit, pbft.gas_limit}; +} + } // namespace taraxa diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index be7768564b..380149a3a3 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -2,6 +2,7 @@ #include "common/config_exception.hpp" +namespace taraxa { Json::Value enc_json(const Redelegation& obj) { Json::Value json(Json::objectValue); json["validator"] = dev::toJS(obj.validator); @@ -137,6 +138,24 @@ RLP_FIELDS_DEFINE(FicusHardforkConfig, block_num, pillar_blocks_interval, bridge // } // RLP_FIELDS_DEFINE(BambooHardfork, block_num, redelegations) +Json::Value enc_json(const CornusHardforkConfig& obj) { + Json::Value json(Json::objectValue); + json["block_num"] = dev::toJS(obj.block_num); + json["delegation_locking_period"] = dev::toJS(obj.delegation_locking_period); + json["dag_gas_limit"] = dev::toJS(obj.dag_gas_limit); + json["pbft_gas_limit"] = dev::toJS(obj.pbft_gas_limit); + return json; +} + +void dec_json(const Json::Value& json, CornusHardforkConfig& obj) { + obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); + obj.delegation_locking_period = dev::getUInt(json["delegation_locking_period"]); + obj.dag_gas_limit = dev::getUInt(json["dag_gas_limit"]); + obj.pbft_gas_limit = dev::getUInt(json["pbft_gas_limit"]); +} + +RLP_FIELDS_DEFINE(CornusHardforkConfig, block_num, delegation_locking_period, dag_gas_limit, pbft_gas_limit) + Json::Value enc_json(const HardforksConfig& obj) { Json::Value json(Json::objectValue); json["fix_redelegate_block_num"] = dev::toJS(obj.fix_redelegate_block_num); @@ -157,6 +176,7 @@ Json::Value enc_json(const HardforksConfig& obj) { json["aspen_hf"] = enc_json(obj.aspen_hf); json["ficus_hf"] = enc_json(obj.ficus_hf); // json["bamboo_hf"] = enc_json(obj.bamboo_hf); + json["cornus_hf"] = enc_json(obj.cornus_hf); return json; } @@ -188,7 +208,9 @@ void dec_json(const Json::Value& json, HardforksConfig& obj) { dec_json(json["aspen_hf"], obj.aspen_hf); dec_json(json["ficus_hf"], obj.ficus_hf); // dec_json(json["bamboo_hf"], obj.bamboo_hf); + dec_json(json["cornus_hf"], obj.cornus_hf); } RLP_FIELDS_DEFINE(HardforksConfig, fix_redelegate_block_num, redelegations, rewards_distribution_frequency, magnolia_hf, - phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf) \ No newline at end of file + phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf, cornus_hf) +} // namespace taraxa \ No newline at end of file diff --git a/libraries/config/src/network.cpp b/libraries/config/src/network.cpp index 0509e3a68c..59701f0d69 100644 --- a/libraries/config/src/network.cpp +++ b/libraries/config/src/network.cpp @@ -1,5 +1,6 @@ #include "config/network.hpp" +#include "common/config_exception.hpp" #include "config/config_utils.hpp" namespace taraxa { @@ -32,6 +33,11 @@ void dec_json(const Json::Value &json, ConnectionConfig &config) { config.ws_port = ws_port.asUInt(); } + // max pending tasks + if (auto max_pending_tasks = getConfigData(json, {"max_pending_tasks"}, true); !max_pending_tasks.isNull()) { + config.max_pending_tasks = max_pending_tasks.asUInt(); + } + // number of threads processing rpc calls if (auto threads_num = getConfigData(json, {"threads_num"}, true); !threads_num.isNull()) { config.threads_num = threads_num.asUInt(); @@ -69,10 +75,9 @@ DdosProtectionConfig dec_ddos_protection_config_json(const Json::Value &json) { ddos_protection.packets_stats_time_period_ms = std::chrono::milliseconds{getConfigDataAsUInt(json, {"packets_stats_time_period_ms"})}; ddos_protection.peer_max_packets_processing_time_us = - std::chrono::microseconds{getConfigDataAsUInt64(json, {"peer_max_packets_processing_time_us"})}; - ddos_protection.peer_max_packets_queue_size_limit = - getConfigDataAsUInt64(json, {"peer_max_packets_queue_size_limit"}); - ddos_protection.max_packets_queue_size = getConfigDataAsUInt64(json, {"max_packets_queue_size"}); + std::chrono::microseconds{getConfigDataAsUInt(json, {"peer_max_packets_processing_time_us"})}; + ddos_protection.peer_max_packets_queue_size_limit = getConfigDataAsUInt(json, {"peer_max_packets_queue_size_limit"}); + ddos_protection.max_packets_queue_size = getConfigDataAsUInt(json, {"max_packets_queue_size"}); return ddos_protection; } @@ -127,6 +132,12 @@ void dec_json(const Json::Value &json, NetworkConfig &network) { network.listen_port = getConfigDataAsUInt(json, {"listen_port"}); network.transaction_interval_ms = getConfigDataAsUInt(json, {"transaction_interval_ms"}); network.ideal_peer_count = getConfigDataAsUInt(json, {"ideal_peer_count"}); + Json::Value priority_nodes = json["priority_nodes"]; + if (!priority_nodes.isNull()) { + for (const auto &item : priority_nodes) { + network.trusted_nodes.insert(dev::p2p::NodeID(item.asString())); + } + } network.max_peer_count = getConfigDataAsUInt(json, {"max_peer_count"}); network.sync_level_size = getConfigDataAsUInt(json, {"sync_level_size"}); network.packets_processing_threads = getConfigDataAsUInt(json, {"packets_processing_threads"}); diff --git a/libraries/config/src/state_config.cpp b/libraries/config/src/state_config.cpp index e938d3d000..4abccf6eb1 100644 --- a/libraries/config/src/state_config.cpp +++ b/libraries/config/src/state_config.cpp @@ -2,8 +2,6 @@ #include -#include - #include "common/vrf_wrapper.hpp" namespace taraxa::state_api { diff --git a/libraries/core_libs/CMakeLists.txt b/libraries/core_libs/CMakeLists.txt index bd00ca099d..146a6cf48f 100644 --- a/libraries/core_libs/CMakeLists.txt +++ b/libraries/core_libs/CMakeLists.txt @@ -56,7 +56,7 @@ target_link_libraries(core_libs PUBLIC p2p metrics Jsonrpccpp - rocksdb + CONAN_PKG::rocksdb # GraphQL cppgraphqlgen::graphqlservice diff --git a/libraries/core_libs/consensus/include/dag/dag.hpp b/libraries/core_libs/consensus/include/dag/dag.hpp index 9f10ac9fa1..c414a03d3f 100644 --- a/libraries/core_libs/consensus/include/dag/dag.hpp +++ b/libraries/core_libs/consensus/include/dag/dag.hpp @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -12,15 +11,11 @@ #include #include #include -#include -#include -#include -#include #include #include "common/types.hpp" -#include "common/util.hpp" -#include "dag/dag_block.hpp" +#include "logger/logger.hpp" + namespace taraxa { /** @addtogroup DAG diff --git a/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp b/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp index 0b79a617ed..9e1fea8fc6 100644 --- a/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp @@ -1,12 +1,9 @@ #pragma once #include -#include #include #include -#include "boost/thread.hpp" -#include "config/config.hpp" #include "dag/dag_block.hpp" #include "logger/logger.hpp" #include "network/network.hpp" @@ -19,6 +16,7 @@ namespace taraxa { class TransactionManager; class KeyManager; class DagManager; +struct FullNodeConfig; namespace final_chain { class FinalChain; @@ -35,11 +33,9 @@ class FinalChain; */ class DagBlockProposer { public: - DagBlockProposer(const DagBlockProposerConfig& bp_config, std::shared_ptr dag_mgr, + DagBlockProposer(const FullNodeConfig& config, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, std::shared_ptr final_chain, - std::shared_ptr db, std::shared_ptr key_manager, addr_t node_addr, - secret_t node_sk, vrf_wrapper::vrf_sk_t vrf_sk, uint64_t pbft_gas_limit, uint64_t dag_gas_limit, - const state_api::Config& state_config); + std::shared_ptr db, std::shared_ptr key_manager); ~DagBlockProposer() { stop(); } DagBlockProposer(const DagBlockProposer&) = delete; DagBlockProposer(DagBlockProposer&&) = delete; @@ -94,8 +90,8 @@ class DagBlockProposer { * @param estimations transactions gas estimation * @param vdf vdf with correct difficulty calculation */ - DagBlock createDagBlock(DagFrontier&& frontier, level_t level, const SharedTransactions& trxs, - std::vector&& estimations, VdfSortition&& vdf) const; + std::shared_ptr createDagBlock(DagFrontier&& frontier, level_t level, const SharedTransactions& trxs, + std::vector&& estimations, VdfSortition&& vdf) const; /** * @brief Gets transactions to include in the block - sharding not supported yet diff --git a/libraries/core_libs/consensus/include/dag/dag_manager.hpp b/libraries/core_libs/consensus/include/dag/dag_manager.hpp index 30d025ba1a..634135ad17 100644 --- a/libraries/core_libs/consensus/include/dag/dag_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_manager.hpp @@ -6,6 +6,7 @@ #include "sortition_params_manager.hpp" #include "storage/storage.hpp" #include "transaction/transaction_manager.hpp" + namespace taraxa { /** @addtogroup DAG @@ -15,6 +16,7 @@ class Network; class DagBuffer; class FullNode; class KeyManager; +struct DagConfig; /** * @brief DagManager class contains in memory representation of part of the DAG that is not yet finalized in a pbft @@ -44,13 +46,9 @@ class DagManager : public std::enable_shared_from_this { MissingTip }; - explicit DagManager(const DagBlock &dag_genesis_block, addr_t node_addr, const SortitionConfig &sortition_config, - const DagConfig &dag_config, std::shared_ptr trx_mgr, - std::shared_ptr pbft_chain, std::shared_ptr final_chain, - std::shared_ptr db, std::shared_ptr key_manager, uint64_t pbft_gas_limit, - const state_api::Config &state_config, bool is_light_node = false, - uint64_t light_node_history = 0, uint32_t max_levels_per_period = kMaxLevelsPerPeriod, - uint32_t dag_expiry_limit = kDagExpiryLevelLimit); + explicit DagManager(const FullNodeConfig &config, addr_t node_addr, std::shared_ptr trx_mgr, + std::shared_ptr pbft_chain, std::shared_ptr final_chain, + std::shared_ptr db, std::shared_ptr key_manager); DagManager(const DagManager &) = delete; DagManager(DagManager &&) = delete; @@ -81,14 +79,15 @@ class DagManager : public std::enable_shared_from_this { * @return verification result and all the transactions which are part of the block */ std::pair verifyBlock( - const DagBlock &blk, const std::unordered_map> &trxs = {}); + const std::shared_ptr &blk, + const std::unordered_map> &trxs = {}); /** * @brief Checks if block pivot and tips are in DAG * @param blk Block to check * @return true if all pivot and tips are in the DAG, false if some is missing with the hash of missing tips/pivot */ - std::pair> pivotAndTipsAvailable(DagBlock const &blk); + std::pair> pivotAndTipsAvailable(const std::shared_ptr &blk); /** * @brief adds verified DAG block in the DAG @@ -97,8 +96,8 @@ class DagManager : public std::enable_shared_from_this { * @param save if true save block and transactions to database * @return true if block added successfully, false with the hash of missing tips/pivot */ - std::pair> addDagBlock(DagBlock &&blk, SharedTransactions &&trxs = {}, - bool proposed = false, + std::pair> addDagBlock(const std::shared_ptr &blk, + SharedTransactions &&trxs = {}, bool proposed = false, bool save = true); // insert to buffer if fail /** @@ -123,9 +122,6 @@ class DagManager : public std::enable_shared_from_this { */ uint setDagBlockOrder(blk_hash_t const &anchor, PbftPeriod period, vec_blk_t const &dag_order); - uint64_t getLightNodeHistory() const { return light_node_history_; } - bool isLightNode() const { return is_light_node_; } - std::optional>> getLatestPivotAndTips() const; /** @@ -189,7 +185,9 @@ class DagManager : public std::enable_shared_from_this { */ std::pair getNonFinalizedBlocksSize() const; - util::Event const block_verified_{}; + uint32_t getNonFinalizedBlocksMinDifficulty() const; + + util::Event> const block_verified_{}; /** * @brief Retrieves Dag Manager mutex, only to be used when finalizing pbft block @@ -237,7 +235,7 @@ class DagManager : public std::enable_shared_from_this { * @brief Clears light node history * */ - void clearLightNodeHistory(); + void clearLightNodeHistory(uint64_t light_node_history); private: void recoverDag(); @@ -263,12 +261,11 @@ class DagManager : public std::enable_shared_from_this { blk_hash_t old_anchor_; // anchor of the second to last period PbftPeriod period_; // last period std::map> non_finalized_blks_; + uint32_t non_finalized_blks_min_difficulty_ = UINT32_MAX; DagFrontier frontier_; SortitionParamsManager sortition_params_manager_; - const DagConfig dag_config_; + const DagConfig &dag_config_; const std::shared_ptr genesis_block_; - const bool is_light_node_ = false; - const uint64_t light_node_history_ = 0; const uint32_t max_levels_per_period_; const uint32_t dag_expiry_limit_; // Any non finalized dag block with a level smaller by // dag_expiry_limit_ than the current period anchor level is considered @@ -280,10 +277,9 @@ class DagManager : public std::enable_shared_from_this { const uint32_t cache_max_size_ = 10000; const uint32_t cache_delete_step_ = 100; - ExpirationCacheMap seen_blocks_; - std::shared_ptr final_chain_; - const uint64_t kPbftGasLimit; - const HardforksConfig kHardforks; + ExpirationCacheMap> seen_blocks_; + std::shared_ptr final_chain_; + const GenesisConfig kGenesis; const uint64_t kValidatorMaxVote; LOG_OBJECTS_DEFINE diff --git a/libraries/core_libs/consensus/include/dag/sortition_params_manager.hpp b/libraries/core_libs/consensus/include/dag/sortition_params_manager.hpp index 8039bc3d17..244897aea3 100644 --- a/libraries/core_libs/consensus/include/dag/sortition_params_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/sortition_params_manager.hpp @@ -55,7 +55,7 @@ class SortitionParamsManager { * @param batch DB batch in which all changes will be added * @param non_empty_pbft_chain_size PBFT chain size excluding pbft blocks with null anchor */ - void pbftBlockPushed(const PeriodData& block, DbStorage::Batch& batch, PbftPeriod non_empty_pbft_chain_size); + void pbftBlockPushed(const PeriodData& block, Batch& batch, PbftPeriod non_empty_pbft_chain_size); /** * Calculate average DAG efficiency from dag_efficiencies_. Used at the end of interval. diff --git a/libraries/core_libs/consensus/include/final_chain/cache.hpp b/libraries/core_libs/consensus/include/final_chain/cache.hpp index 740c41b490..477c90b63d 100644 --- a/libraries/core_libs/consensus/include/final_chain/cache.hpp +++ b/libraries/core_libs/consensus/include/final_chain/cache.hpp @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include #include diff --git a/libraries/core_libs/consensus/include/final_chain/data.hpp b/libraries/core_libs/consensus/include/final_chain/data.hpp index 2ec581b685..5e71c3c4c9 100644 --- a/libraries/core_libs/consensus/include/final_chain/data.hpp +++ b/libraries/core_libs/consensus/include/final_chain/data.hpp @@ -5,8 +5,6 @@ #include #include -#include - #include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "transaction/transaction.hpp" @@ -25,35 +23,49 @@ using LogBloom = dev::h2048; using LogBlooms = std::vector; using Nonce = dev::h64; -struct BlockHeader { - h256 hash; +struct BlockHeaderData { h256 parent_hash; h256 state_root; h256 transactions_root; h256 receipts_root; LogBloom log_bloom; - EthBlockNumber number = 0; - uint64_t gas_limit = 0; uint64_t gas_used = 0; - bytes extra_data; - uint64_t timestamp = 0; - Address author; u256 total_reward; uint64_t size = 0; + dev::bytes serializeForDB() const; + HAS_RLP_FIELDS +}; + +struct BlockHeader : BlockHeaderData { + BlockHeader() = default; + BlockHeader(std::string&& raw_header_data); + BlockHeader(std::string&& raw_header_data, const PbftBlock& pbft, uint64_t gas_limit); - static const h256& uncles_hash(); + void setFromPbft(const PbftBlock& pbft); + + static h256 const& unclesHash(); static const Nonce& nonce(); static const u256& difficulty(); - static const h256& mix_hash(); + static h256 const& mixHash(); + + static std::shared_ptr fromRLP(const dev::RLP& rlp); + + void ethereumRlp(dev::RLPStream& encoding) const; + dev::bytes ethereumRlp() const; - static std::shared_ptr from_rlp(const dev::RLP& rlp); + h256 hash; + Address author; + uint64_t gas_limit = 0; + uint64_t timestamp = 0; + EthBlockNumber number = 0; + bytes extra_data; - void ethereum_rlp(dev::RLPStream& encoding) const; + HAS_RLP_FIELDS }; static constexpr auto c_bloomIndexSize = 16; diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index eeaef817a2..a98c7c705f 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -5,9 +5,11 @@ #include "common/event.hpp" #include "common/types.hpp" #include "config/config.hpp" +#include "config/state_config.hpp" +#include "final_chain/cache.hpp" #include "final_chain/data.hpp" #include "final_chain/state_api.hpp" -#include "final_chain/state_api_data.hpp" +#include "rewards/rewards_stats.hpp" #include "storage/storage.hpp" namespace taraxa::final_chain { @@ -16,8 +18,6 @@ namespace taraxa::final_chain { * @{ */ -enum class DBMetaKeys { LAST_NUMBER = 1 }; - /** * @brief main responsibility is blocks execution in EVM, getting data from EVM state * @@ -34,16 +34,16 @@ class FinalChain { decltype(block_finalized_emitter_)::Subscriber const& block_finalized_ = block_finalized_emitter_; decltype(block_applying_emitter_)::Subscriber const& block_applying_ = block_applying_emitter_; - FinalChain() = default; - virtual ~FinalChain() = default; + ~FinalChain() = default; + FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr); FinalChain(const FinalChain&) = delete; FinalChain(FinalChain&&) = delete; FinalChain& operator=(const FinalChain&) = delete; FinalChain& operator=(FinalChain&&) = delete; - virtual void stop() = 0; + void stop(); - virtual EthBlockNumber delegation_delay() const = 0; + EthBlockNumber delegationDelay() const; /** * @brief Method which finalizes a block and executes it in EVM @@ -53,9 +53,9 @@ class FinalChain { * @param precommit_ext * @return finalization result */ - virtual std::future> finalize( - PeriodData&& period_data, std::vector&& finalized_dag_blk_hashes, - std::shared_ptr&& anchor = nullptr) = 0; + std::future> finalize(PeriodData&& period_data, + std::vector&& finalized_dag_blk_hashes, + std::shared_ptr&& anchor = nullptr); /** * @brief Method to get block header by block number @@ -63,13 +63,13 @@ class FinalChain { * @param n block number of header to get. If not specified then it returns latest * @return BlockHeader */ - virtual std::shared_ptr block_header(std::optional n = {}) const = 0; + std::shared_ptr blockHeader(std::optional n = {}) const; /** * @brief Method to get last block number(chain size) * @return EthBlockNumber */ - virtual EthBlockNumber last_block_number() const = 0; + EthBlockNumber lastBlockNumber() const; /** * @brief Method to get block number by hash @@ -77,7 +77,7 @@ class FinalChain { * @param h block hash * @return EthBlockNumber */ - virtual std::optional block_number(h256 const& h) const = 0; + std::optional blockNumber(h256 const& h) const; /** * @brief Method to get block hash by block number @@ -85,48 +85,56 @@ class FinalChain { * @param n EthBlockNumber * @return BlockHash h256 */ - virtual std::optional block_hash(std::optional n = {}) const = 0; + std::optional blockHash(std::optional n = {}) const; + + /** + * @brief Method to get the final chain hash by block number + * + * @param n block number + * @return std::optional final chain hash or nullopt + */ + std::optional finalChainHash(EthBlockNumber n) const; /** * @brief Needed if we are changing params with hardfork and it affects Go part of code. For example DPOS contract * @param new_config state_api::Config */ - virtual void update_state_config(const state_api::Config& new_config) = 0; + void updateStateConfig(const state_api::Config& new_config); /** * @brief Method to get all transaction hashes from the block * @param n EthBlockNumber * @return TransactionHashes list if transaction hashes */ - virtual std::shared_ptr transaction_hashes(std::optional n = {}) const = 0; + std::shared_ptr transactionHashes(std::optional n = {}) const; /** * @brief Method to get all transactions from the block * @param n EthBlockNumber * @return SharedTransactions vector of shared_ptrs to Transaction */ - virtual const SharedTransactions transactions(std::optional n = {}) const = 0; + const SharedTransactions transactions(std::optional n = {}) const; /** * @brief Method to get transaction location by hash * @param trx_hash hash of transaction to get location for * @return std::optional transaction location or nullopt */ - virtual std::optional transaction_location(h256 const& trx_hash) const = 0; + std::optional transactionLocation(h256 const& trx_hash) const; /** * @brief Method to get transaction receipt by hash * @param _transactionHash hash of transaction to get receipt for * @return std::optional transaction receipt or nullopt */ - virtual std::optional transaction_receipt(h256 const& _transactionHash) const = 0; + std::optional transactionReceipt(h256 const& _transactionHash) const; /** * @brief Method to get transactions count in block * @param n block number * @return count of transactions in block */ - virtual uint64_t transactionCount(std::optional n = {}) const = 0; + uint64_t transactionCount(std::optional n = {}) const; /** * @brief Method used to search for contract logs with bloom filter @@ -135,8 +143,7 @@ class FinalChain { * @param to EthBlockNumber block to end search * @return block that matches specified bloom filter */ - virtual std::vector withBlockBloom(LogBloom const& b, EthBlockNumber from, - EthBlockNumber to) const = 0; + std::vector withBlockBloom(LogBloom const& b, EthBlockNumber from, EthBlockNumber to) const; /** * @brief Method to get account information @@ -145,8 +152,7 @@ class FinalChain { * @param blk_n number of block we are getting state from * @return std::optional account object or nullopt if account wasn't found */ - virtual std::optional get_account(addr_t const& addr, - std::optional blk_n = {}) const = 0; + std::optional getAccount(addr_t const& addr, std::optional blk_n = {}) const; /** * @brief Returns the value from a storage position at a given address. @@ -155,15 +161,14 @@ class FinalChain { * @param blk_n number of block we are getting state from * @return the value at this storage position */ - virtual h256 get_account_storage(addr_t const& addr, u256 const& key, - std::optional blk_n = {}) const = 0; + h256 getAccountStorage(addr_t const& addr, u256 const& key, std::optional blk_n = {}) const; /** * @brief Returns code at a given address. * @param addr account address * @param blk_n number of block we are getting state from * @return code at a given address. */ - virtual bytes get_code(addr_t const& addr, std::optional blk_n = {}) const = 0; + bytes getCode(addr_t const& addr, std::optional blk_n = {}) const; /** * @brief Executes a new message call immediately without creating a transaction on the block chain. That means that @@ -172,8 +177,7 @@ class FinalChain { * @param blk_n EthBlockNumber number of block we are getting state from * @return state_api::ExecutionResult */ - virtual state_api::ExecutionResult call(state_api::EVMTransaction const& trx, - std::optional blk_n = {}) const = 0; + state_api::ExecutionResult call(state_api::EVMTransaction const& trx, std::optional blk_n = {}) const; /** * @brief Trace execution of a new message call immediately without creating a transactions on the block chain. That @@ -182,15 +186,15 @@ class FinalChain { * @param blk_n EthBlockNumber number of block we are getting state from * @return std::string */ - virtual std::string trace(std::vector trx, EthBlockNumber blk_n, - std::optional params = {}) const = 0; + std::string trace(std::vector state_trxs, std::vector trxs, + EthBlockNumber blk_n, std::optional params = {}) const; /** * @brief total count of eligible votes are in DPOS precompiled contract * @param blk_num EthBlockNumber number of block we are getting state from * @return total count of eligible votes */ - virtual uint64_t dpos_eligible_total_vote_count(EthBlockNumber blk_num) const = 0; + uint64_t dposEligibleTotalVoteCount(EthBlockNumber blk_num) const; /** * @brief total count of eligible votes account has in DPOS precompiled contract @@ -198,7 +202,7 @@ class FinalChain { * @param addr account address * @return address eligible votes count */ - virtual uint64_t dpos_eligible_vote_count(EthBlockNumber blk_num, addr_t const& addr) const = 0; + uint64_t dposEligibleVoteCount(EthBlockNumber blk_num, addr_t const& addr) const; /** * @brief method to check if address have enough votes to participate in consensus @@ -206,7 +210,7 @@ class FinalChain { * @param addr account address * @return is address eligible */ - virtual bool dpos_is_eligible(EthBlockNumber blk_num, addr_t const& addr) const = 0; + bool dposIsEligible(EthBlockNumber blk_num, addr_t const& addr) const; /** * @brief Get the vrf key object from DPOS state @@ -214,66 +218,122 @@ class FinalChain { * @param blk_n number of block we are getting state from * @return vrf_wrapper::vrf_pk_t */ - virtual vrf_wrapper::vrf_pk_t dpos_get_vrf_key(EthBlockNumber blk_n, const addr_t& addr) const = 0; + vrf_wrapper::vrf_pk_t dposGetVrfKey(EthBlockNumber blk_n, const addr_t& addr) const; /** * @brief Prune state db for all blocks older than blk_n * @param blk_n number of block we are getting state from */ - virtual void prune(EthBlockNumber blk_n) = 0; + void prune(EthBlockNumber blk_n); /** * @brief Wait until next block is finalized */ - virtual void wait_for_finalized() = 0; + void waitForFinalized(); - virtual std::vector dpos_validators_total_stakes(EthBlockNumber blk_num) const = 0; + std::vector dposValidatorsTotalStakes(EthBlockNumber blk_num) const; - virtual uint256_t dpos_total_amount_delegated(EthBlockNumber blk_num) const = 0; + uint256_t dposTotalAmountDelegated(EthBlockNumber blk_num) const; /** * @param blk_num * @return vector of validators vote counts for provided blk_num */ - virtual std::vector dpos_validators_vote_counts(EthBlockNumber blk_num) const = 0; + std::vector dposValidatorsVoteCounts(EthBlockNumber blk_num) const; /** * @param blk_num * @return yield */ - virtual uint64_t dpos_yield(EthBlockNumber blk_num) const = 0; + uint64_t dposYield(EthBlockNumber blk_num) const; /** * @param blk_num * @return total supply */ - virtual u256 dpos_total_supply(EthBlockNumber blk_num) const = 0; + u256 dposTotalSupply(EthBlockNumber blk_num) const; /** * @param blk_num * @return bridge root */ - virtual h256 get_bridge_root(EthBlockNumber blk_num) const = 0; + h256 getBridgeRoot(EthBlockNumber blk_num) const; /** * @param blk_num * @return bridge epoch */ - virtual h256 get_bridge_epoch(EthBlockNumber blk_num) const = 0; - // TODO move out of here: + h256 getBridgeEpoch(EthBlockNumber blk_num) const; - std::pair getBalance(addr_t const& addr) const { - if (auto acc = get_account(addr)) { - return {acc->balance, true}; - } - return {0, false}; - } + // TODO move out of here: + std::pair getBalance(addr_t const& addr) const; + std::shared_ptr finalize_(PeriodData&& new_blk, + std::vector&& finalized_dag_blk_hashes, + std::shared_ptr&& anchor); + + const SharedTransactions getTransactions(std::optional n = {}) const; + + private: + std::shared_ptr getTransactionHashes(std::optional n = {}) const; + std::shared_ptr getBlockHeader(EthBlockNumber n) const; + std::optional getBlockHash(EthBlockNumber n) const; + EthBlockNumber lastIfAbsent(const std::optional& client_blk_n) const; + static state_api::EVMTransaction toEvmTransaction(const SharedTransaction& trx); + static void appendEvmTransactions(std::vector& evm_trxs, const SharedTransactions& trxs); + BlocksBlooms blockBlooms(const h256& chunk_id) const; + static h256 blockBloomsChunkId(EthBlockNumber level, EthBlockNumber index); + std::vector withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to, + EthBlockNumber level, EthBlockNumber index) const; + bool isNeedToFinalize(EthBlockNumber blk_num) const; + + SharedTransaction makeBridgeFinalizationTransaction(); + std::vector makeSystemTransactions(PbftPeriod blk_num); + + std::shared_ptr makeGenesisHeader(std::string&& raw_header) const; + std::shared_ptr makeGenesisHeader(const h256& state_root) const; + + std::shared_ptr appendBlock(Batch& batch, const PbftBlock& pbft_blk, const h256& state_root, + u256 total_reward, const SharedTransactions& transactions = {}, + const TransactionReceipts& receipts = {}); + std::shared_ptr appendBlock(Batch& batch, std::shared_ptr header, + const SharedTransactions& transactions = {}, + const TransactionReceipts& receipts = {}); + + private: + std::shared_ptr db_; + const uint64_t kBlockGasLimit; + StateAPI state_api_; + const bool kLightNode = false; + const uint32_t kMaxLevelsPerPeriod; + rewards::Stats rewards_; + + // It is not prepared to use more then 1 thread. Examine it if you want to change threads count + boost::asio::thread_pool executor_thread_{1}; + + std::atomic num_executed_dag_blk_ = 0; + std::atomic num_executed_trx_ = 0; + + EthBlockNumber delegation_delay_; + + ValueByBlockCache> block_headers_cache_; + ValueByBlockCache> block_hashes_cache_; + ValueByBlockCache transactions_cache_; + ValueByBlockCache> transaction_hashes_cache_; + MapByBlockCache> accounts_cache_; + + ValueByBlockCache total_vote_count_cache_; + MapByBlockCache dpos_vote_count_cache_; + MapByBlockCache dpos_is_eligible_cache_; + + std::condition_variable finalized_cv_; + std::mutex finalized_mtx_; + + std::atomic last_block_number_; + + const FullNodeConfig& kConfig; + LOG_OBJECTS_DEFINE }; /** @} */ } // namespace taraxa::final_chain - -namespace taraxa { -using final_chain::FinalChain; -} // namespace taraxa diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain_impl.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain_impl.hpp deleted file mode 100644 index fc462ab093..0000000000 --- a/libraries/core_libs/consensus/include/final_chain/final_chain_impl.hpp +++ /dev/null @@ -1,109 +0,0 @@ -#pragma once - -#include "final_chain/cache.hpp" -#include "final_chain/final_chain.hpp" -#include "rewards/rewards_stats.hpp" - -namespace taraxa::final_chain { - -class FinalChainImpl final : public FinalChain { - std::shared_ptr db_; - const uint64_t kBlockGasLimit; - StateAPI state_api_; - const bool kLightNode = false; - const uint32_t kMaxLevelsPerPeriod; - rewards::Stats rewards_; - - // It is not prepared to use more then 1 thread. Examine it if you want to change threads count - boost::asio::thread_pool executor_thread_{1}; - - std::atomic num_executed_dag_blk_ = 0; - std::atomic num_executed_trx_ = 0; - - EthBlockNumber delegation_delay_; - - ValueByBlockCache> block_headers_cache_; - ValueByBlockCache> block_hashes_cache_; - ValueByBlockCache transactions_cache_; - ValueByBlockCache> transaction_hashes_cache_; - MapByBlockCache> accounts_cache_; - - ValueByBlockCache total_vote_count_cache_; - MapByBlockCache dpos_vote_count_cache_; - MapByBlockCache dpos_is_eligible_cache_; - - std::condition_variable finalized_cv_; - std::mutex finalized_mtx_; - - std::atomic last_block_number_; - - const HardforksConfig& kHardforksConfig; - LOG_OBJECTS_DEFINE - - public: - FinalChainImpl(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr); - - void stop() override; - std::future> finalize( - PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, - std::shared_ptr&& anchor = nullptr) override; - EthBlockNumber delegation_delay() const override; - SharedTransaction make_bridge_finalization_transaction(); - bool isNeedToFinalize(EthBlockNumber blk_num) const; - std::vector makeSystemTransactions(PbftPeriod blk_num); - std::shared_ptr finalize_(PeriodData&& new_blk, - std::vector&& finalized_dag_blk_hashes, - std::shared_ptr&& anchor); - void prune(EthBlockNumber blk_n) override; - std::shared_ptr append_block(DB::Batch& batch, const addr_t& author, uint64_t timestamp, - uint64_t gas_limit, const h256& state_root, u256 total_reward, - const SharedTransactions& transactions = {}, - const TransactionReceipts& receipts = {}, const bytes& extra_data = {}); - EthBlockNumber last_block_number() const override; - std::optional block_number(const h256& h) const override; - std::optional block_hash(std::optional n = {}) const override; - std::shared_ptr block_header(std::optional n = {}) const override; - std::optional transaction_location(const h256& trx_hash) const override; - std::optional transaction_receipt(const h256& trx_h) const override; - uint64_t transactionCount(std::optional n = {}) const override; - std::shared_ptr transaction_hashes(std::optional n = {}) const override; - const SharedTransactions transactions(std::optional n = {}) const override; - std::vector withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to) const override; - std::optional get_account(const addr_t& addr, - std::optional blk_n = {}) const override; - void update_state_config(const state_api::Config& new_config) override; - h256 get_account_storage(const addr_t& addr, const u256& key, - std::optional blk_n = {}) const override; - bytes get_code(const addr_t& addr, std::optional blk_n = {}) const override; - state_api::ExecutionResult call(const state_api::EVMTransaction& trx, - std::optional blk_n = {}) const override; - std::string trace(std::vector trxs, EthBlockNumber blk_n, - std::optional params = {}) const override; - uint64_t dpos_eligible_total_vote_count(EthBlockNumber blk_num) const override; - uint64_t dpos_eligible_vote_count(EthBlockNumber blk_num, const addr_t& addr) const override; - bool dpos_is_eligible(EthBlockNumber blk_num, const addr_t& addr) const override; - vrf_wrapper::vrf_pk_t dpos_get_vrf_key(EthBlockNumber blk_n, const addr_t& addr) const override; - std::vector dpos_validators_total_stakes(EthBlockNumber blk_num) const override; - virtual uint256_t dpos_total_amount_delegated(EthBlockNumber blk_num) const override; - std::vector dpos_validators_vote_counts(EthBlockNumber blk_num) const override; - void wait_for_finalized() override; - uint64_t dpos_yield(EthBlockNumber blk_num) const override; - u256 dpos_total_supply(EthBlockNumber blk_num) const override; - h256 get_bridge_root(EthBlockNumber blk_num) const override; - h256 get_bridge_epoch(EthBlockNumber blk_num) const override; - - private: - std::shared_ptr get_transaction_hashes(std::optional n = {}) const; - const SharedTransactions get_transactions(std::optional n = {}) const; - std::shared_ptr get_block_header(EthBlockNumber n) const; - std::optional get_block_hash(EthBlockNumber n) const; - EthBlockNumber last_if_absent(const std::optional& client_blk_n) const; - static state_api::EVMTransaction to_evm_transaction(const SharedTransaction& trx); - static void append_evm_transactions(std::vector& evm_trxs, const SharedTransactions& trxs); - BlocksBlooms block_blooms(const h256& chunk_id) const; - static h256 block_blooms_chunk_id(EthBlockNumber level, EthBlockNumber index); - std::vector withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to, - EthBlockNumber level, EthBlockNumber index) const; -}; - -} // namespace taraxa::final_chain \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index 3075825abb..291fce35af 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -6,10 +6,13 @@ #include "final_chain/state_api_data.hpp" #include "rewards/block_stats.hpp" -#include "storage/storage.hpp" namespace taraxa::state_api { +struct Config; +struct Opts; +struct OptsDB; + /** @addtogroup FinalChain * @{ */ @@ -39,8 +42,8 @@ class StateAPI { h256 get_account_storage(EthBlockNumber blk_num, const addr_t& addr, const u256& key) const; bytes get_code_by_address(EthBlockNumber blk_num, const addr_t& addr) const; ExecutionResult dry_run_transaction(EthBlockNumber blk_num, const EVMBlock& blk, const EVMTransaction& trx) const; - bytes trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trx, - std::optional params = {}) const; + bytes trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector& state_trxs, + const std::vector& trxs, std::optional params = {}) const; StateDescriptor get_last_committed_state_descriptor() const; const TransactionsExecutionResult& execute_transactions(const EVMBlock& block, diff --git a/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp b/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp index fae4aeb841..1a9daacdcb 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp @@ -3,12 +3,10 @@ #include #include #include -#include #include #include "common/encoding_rlp.hpp" #include "common/types.hpp" -#include "config/state_config.hpp" namespace taraxa::state_api { diff --git a/libraries/core_libs/consensus/include/key_manager/key_manager.hpp b/libraries/core_libs/consensus/include/key_manager/key_manager.hpp index f698dcb5f9..7c44f58bca 100644 --- a/libraries/core_libs/consensus/include/key_manager/key_manager.hpp +++ b/libraries/core_libs/consensus/include/key_manager/key_manager.hpp @@ -10,7 +10,7 @@ namespace taraxa { class KeyManager { public: - KeyManager(std::shared_ptr final_chain); + KeyManager(std::shared_ptr final_chain); KeyManager(const KeyManager &) = delete; KeyManager(KeyManager &&) = delete; KeyManager &operator=(const KeyManager &) = delete; @@ -22,7 +22,7 @@ class KeyManager { std::shared_mutex vrf_keys_mutex_; std::unordered_map> vrf_keys_; - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; }; } // namespace taraxa \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/pbft/pbft_chain.hpp b/libraries/core_libs/consensus/include/pbft/pbft_chain.hpp index 2a4bf2ad63..1f9ea8e84a 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_chain.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_chain.hpp @@ -1,11 +1,9 @@ #pragma once -#include #include +#include #include -#include -#include "config/pbft_config.hpp" #include "logger/logger.hpp" #include "pbft/pbft_block.hpp" diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 04ef10314b..289307cdbd 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -1,6 +1,5 @@ #pragma once -#include #include #include "common/types.hpp" @@ -55,11 +54,10 @@ class PbftManager { public: using time_point = std::chrono::system_clock::time_point; - PbftManager(const GenesisConfig &conf, addr_t node_addr, std::shared_ptr db, - std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, - std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, - std::shared_ptr final_chain, - std::shared_ptr pillar_chain_mgr, secret_t node_sk); + PbftManager(const FullNodeConfig &conf, std::shared_ptr db, std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr final_chain, + std::shared_ptr pillar_chain_mgr); ~PbftManager(); PbftManager(const PbftManager &) = delete; PbftManager(PbftManager &&) = delete; @@ -214,7 +212,7 @@ class PbftManager { * @param dag_blocks DAG blocks * @return DAG blocks ordering hash */ - static blk_hash_t calculateOrderHash(const std::vector &dag_blocks); + static blk_hash_t calculateOrderHash(const std::vector> &dag_blocks); /** * @brief Reorder transactions data if DAG reordering caused transactions with same sender to have nonce in incorrect @@ -226,9 +224,10 @@ class PbftManager { /** * @brief Check a block weight of gas estimation * @param dag_blocks dag blocks + * @param period period * @return true if total weight of gas estimation is less or equal to gas limit. Otherwise return false */ - bool checkBlockWeight(const std::vector &dag_blocks) const; + bool checkBlockWeight(const std::vector> &dag_blocks, PbftPeriod period) const; blk_hash_t getLastPbftBlockHash(); @@ -258,7 +257,7 @@ class PbftManager { /** * @brief Test/enforce broadcastVotes() to actually send votes */ - void testBroadcatVotesFunctionality(); + void testBroadcastVotesFunctionality(); /** * @brief Check PBFT blocks syncing queue. If there are synced PBFT blocks in queue, push it to PBFT chain @@ -281,6 +280,14 @@ class PbftManager { */ bool validatePillarDataInPeriodData(const PeriodData &period_data) const; + /** + * @brief Gossips vote to the other peers + * + * @param vote + * @param voted_block + */ + void gossipVote(const std::shared_ptr &vote, const std::shared_ptr &voted_block); + private: /** * @brief Broadcast or rebroadcast 2t+1 soft/reward/previous round next votes + all own votes if needed @@ -306,7 +313,7 @@ class PbftManager { /** * @brief Check if there is 2t+1 cert votes for some valid block, if yes - push it into the chain - * @return true if new cert voted block was pushed into the chain, otheriwse false + * @return true if new cert voted block was pushed into the chain, otherwise false */ bool tryPushCertVotesBlock(); @@ -413,7 +420,6 @@ class PbftManager { * * @param vote * @param voted_block - * @return true if successful, otherwise false */ void gossipNewVote(const std::shared_ptr &vote, const std::shared_ptr &voted_block); @@ -461,11 +467,11 @@ class PbftManager { bool validatePbftBlock(const std::shared_ptr &pbft_block) const; /** - * @brief Validates pbft block state root. + * @brief Validates pbft block final chain hash. * @param pbft_block PBFT block * @return validation result */ - PbftStateRootValidation validatePbftBlockStateRoot(const std::shared_ptr &pbft_block) const; + PbftStateRootValidation validateFinalChainHash(const std::shared_ptr &pbft_block) const; /** * @brief Validates pbft block extra data presence: @@ -530,7 +536,7 @@ class PbftManager { const std::vector> &cert_votes) const; /** - @brief Validates PBFT block [illar] votes + @brief Validates PBFT block pillar votes * * @param period_data * @return @@ -559,7 +565,7 @@ class PbftManager { // Multiple proposed pbft blocks could have same dag block anchor at same period so this cache improves retrieval of // dag block order for specific anchor - mutable std::unordered_map> anchor_dag_block_order_cache_; + mutable std::unordered_map>> anchor_dag_block_order_cache_; std::unique_ptr daemon_; std::shared_ptr db_; @@ -568,7 +574,7 @@ class PbftManager { std::shared_ptr dag_mgr_; std::weak_ptr network_; std::shared_ptr trx_mgr_; - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; std::shared_ptr pillar_chain_mgr_; const addr_t node_addr_; diff --git a/libraries/core_libs/consensus/include/pbft/proposed_blocks.hpp b/libraries/core_libs/consensus/include/pbft/proposed_blocks.hpp index 5a3772751a..e76d084d74 100644 --- a/libraries/core_libs/consensus/include/pbft/proposed_blocks.hpp +++ b/libraries/core_libs/consensus/include/pbft/proposed_blocks.hpp @@ -66,6 +66,14 @@ class ProposedBlocks { */ void cleanupProposedPbftBlocksByPeriod(PbftPeriod period); + /** + * @brief Check if there are any old proposed blocks that were supposed to be deleted + * @param current_period + * + * @return err msg in case there are some old blocks, otherwise empty optional + */ + std::optional checkOldBlocksPresence(PbftPeriod current_period) const; + private: // > std::map, bool>>> proposed_blocks_; diff --git a/libraries/core_libs/consensus/include/pillar_chain/pillar_block.hpp b/libraries/core_libs/consensus/include/pillar_chain/pillar_block.hpp index f6a3b3a5ff..4aa58d1903 100644 --- a/libraries/core_libs/consensus/include/pillar_chain/pillar_block.hpp +++ b/libraries/core_libs/consensus/include/pillar_chain/pillar_block.hpp @@ -1,7 +1,10 @@ #pragma once +#include #include +#include + #include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "final_chain/state_api_data.hpp" diff --git a/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp b/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp index f1ff32d638..9ddf0c3a83 100644 --- a/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp +++ b/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp @@ -3,7 +3,6 @@ #include #include "common/event.hpp" -#include "config/config.hpp" #include "final_chain/data.hpp" #include "logger/logger.hpp" #include "pillar_chain/pillar_block.hpp" @@ -13,6 +12,7 @@ namespace taraxa { class DbStorage; class Network; class KeyManager; +struct FicusHardforkConfig; } // namespace taraxa namespace taraxa::final_chain { @@ -128,7 +128,8 @@ class PillarChainManager { * @param pillar_block_hash * @param above_threshold * - * @return all pillar votes for specified period and pillar block hash + * @return all pillar votes for specified period and pillar block hash. In case above_threshold == true, votes + * are sorted based on vote weight and the minimum number of votes above threshold are returned */ std::vector> getVerifiedPillarVotes(PbftPeriod period, const blk_hash_t pillar_block_hash, bool above_threshold = false) const; @@ -163,12 +164,12 @@ class PillarChainManager { * @param pillar_block * @param new_vote_counts */ - void saveNewPillarBlock(std::shared_ptr pillar_block, + void saveNewPillarBlock(const std::shared_ptr& pillar_block, std::vector&& new_vote_counts); private: // Node config - const FicusHardforkConfig kFicusHfConfig; + const FicusHardforkConfig& kFicusHfConfig; std::shared_ptr db_; std::weak_ptr network_; diff --git a/libraries/core_libs/consensus/include/pillar_chain/pillar_votes.hpp b/libraries/core_libs/consensus/include/pillar_chain/pillar_votes.hpp index 3400509b6e..2f98f3f6c7 100644 --- a/libraries/core_libs/consensus/include/pillar_chain/pillar_votes.hpp +++ b/libraries/core_libs/consensus/include/pillar_chain/pillar_votes.hpp @@ -10,8 +10,8 @@ namespace taraxa::pillar_chain { class PillarVotes { public: struct WeightVotes { - std::unordered_map> votes; - uint64_t weight{0}; // votes weight + std::unordered_map, uint64_t /* vote weight */>> votes; + uint64_t weight{0}; // votes accumulated weight }; struct PeriodVotes { @@ -62,7 +62,7 @@ class PillarVotes { * * @return true if vote was successfully added, otherwise false */ - bool addVerifiedVote(const std::shared_ptr& vote, u_int64_t validator_vote_count); + bool addVerifiedVote(const std::shared_ptr& vote, uint64_t validator_vote_count); /** * @brief Get all pillar block votes for specified pillar block @@ -71,7 +71,8 @@ class PillarVotes { * @param pillar_block_hash * @param above_threshold if true, return only if there is > threshold verified votes * - * @return all pillar block votes for specified period and pillar block hash + * @return all pillar block votes for specified period and pillar block hash. In case above_threshold == true, votes + * are sorted based on vote weight and the minimum number of votes above threshold are returned */ std::vector> getVerifiedVotes(PbftPeriod period, const blk_hash_t& pillar_block_hash, bool above_threshold = false) const; diff --git a/libraries/core_libs/consensus/include/rewards/block_stats.hpp b/libraries/core_libs/consensus/include/rewards/block_stats.hpp index cc80cd1b8e..513a8a40fe 100644 --- a/libraries/core_libs/consensus/include/rewards/block_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/block_stats.hpp @@ -1,7 +1,5 @@ #pragma once -#include - #include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "pbft/period_data.hpp" diff --git a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp index d9668ea1c7..1b74dfe71c 100644 --- a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp @@ -1,3 +1,5 @@ +#pragma once + #include "config/hardfork.hpp" #include "rewards/block_stats.hpp" #include "storage/storage.hpp" @@ -10,7 +12,7 @@ namespace taraxa::rewards { */ class Stats { public: - Stats(uint32_t committee_size, const HardforksConfig& hardforks, std::shared_ptr db, + Stats(uint32_t committee_size, const HardforksConfig& hardforks, std::shared_ptr db, std::function&& dpos_eligible_total_vote_count, EthBlockNumber last_blk_num = 0); /** @@ -19,7 +21,7 @@ class Stats { * @return vector that should be processed at current block */ std::vector processStats(const PeriodData& current_blk, const std::vector& trxs_gas_used, - DbStorage::Batch& write_batch); + Batch& write_batch); /** * @brief called on start of new rewards interval. clears blocks_stats_ collection * and removes all data saved in db column @@ -44,11 +46,11 @@ class Stats { /** * @brief saves stats to database to not lose this data in case of node restart */ - void saveBlockStats(uint64_t number, const BlockStats& stats, DbStorage::Batch& write_batch); + void saveBlockStats(uint64_t number, const BlockStats& stats, Batch& write_batch); const uint32_t kCommitteeSize; const HardforksConfig kHardforksConfig; - std::shared_ptr db_; + std::shared_ptr db_; const std::function dpos_eligible_total_vote_count_; std::unordered_map blocks_stats_; }; diff --git a/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp b/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp index b6d1278045..87787f3879 100644 --- a/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp +++ b/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp @@ -7,8 +7,8 @@ namespace taraxa { class SlashingManager { public: - SlashingManager(std::shared_ptr final_chain, std::shared_ptr trx_manager, - std::shared_ptr gas_pricer, const FullNodeConfig &config, secret_t node_sk); + SlashingManager(const FullNodeConfig &config, std::shared_ptr final_chain, + std::shared_ptr trx_manager, std::shared_ptr gas_pricer); SlashingManager(const SlashingManager &) = delete; SlashingManager(SlashingManager &&) = delete; SlashingManager &operator=(const SlashingManager &) = delete; @@ -17,14 +17,14 @@ class SlashingManager { bool submitDoubleVotingProof(const std::shared_ptr &vote_a, const std::shared_ptr &vote_b); private: - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; std::shared_ptr trx_manager_; std::shared_ptr gas_pricer_; // Already processed double voting proofs ExpirationCache double_voting_proofs_; - const FullNodeConfig kConfig; + const FullNodeConfig &kConfig; const addr_t kAddress; const secret_t kPrivateKey; }; diff --git a/libraries/core_libs/consensus/include/transaction/gas_pricer.hpp b/libraries/core_libs/consensus/include/transaction/gas_pricer.hpp index 4335588f00..0034345825 100644 --- a/libraries/core_libs/consensus/include/transaction/gas_pricer.hpp +++ b/libraries/core_libs/consensus/include/transaction/gas_pricer.hpp @@ -1,12 +1,15 @@ #pragma once #include +#include #include "config/genesis.hpp" -#include "final_chain/final_chain.hpp" +#include "transaction/transaction.hpp" namespace taraxa { +class DbStorage; + /** @addtogroup Transaction * @{ */ diff --git a/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp b/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp index 0d5f272f5d..4cc87377f7 100644 --- a/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp +++ b/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp @@ -1,7 +1,6 @@ #pragma once #include "common/event.hpp" -#include "config/config.hpp" #include "final_chain/final_chain.hpp" #include "logger/logger.hpp" #include "storage/storage.hpp" @@ -19,6 +18,7 @@ namespace taraxa { */ enum class TransactionStatus { Inserted = 0, InsertedNonProposable, Known, Overflow }; +struct FullNodeConfig; class DagBlock; class DagManager; class FullNode; @@ -27,9 +27,9 @@ class FullNode; * @brief TransactionManager class verifies and inserts incoming transactions in memory pool and handles saving * transactions and all transactions state change * - * Incoming new transactions can be verified with verifyTransaction functions and than inserted in the transaction pool - * with insertValidatedTransaction. Transactions are kept in transactions memory pool until they are included in a - * proposed dag block or received in an incoming dag block. Transaction verification consist of: + * Incoming new transactions can be verified with verifyTransaction functions and than inserted in the transaction + * pool with insertValidatedTransaction. Transactions are kept in transactions memory pool until they are included + * in a proposed dag block or received in an incoming dag block. Transaction verification consist of: * - Verifying the format * - Verifying signature * - Verifying chan id @@ -50,8 +50,8 @@ class FullNode; */ class TransactionManager : public std::enable_shared_from_this { public: - TransactionManager(const FullNodeConfig &conf, std::shared_ptr db, std::shared_ptr final_chain, - addr_t node_addr); + TransactionManager(const FullNodeConfig &conf, std::shared_ptr db, + std::shared_ptr final_chain, addr_t node_addr); /** * @brief Estimates required gas value to execute transaction @@ -116,11 +116,11 @@ class TransactionManager : public std::enable_shared_from_this const transaction_accepted_{}; private: - const FullNodeConfig kConf; + const FullNodeConfig &kConf; // Guards updating transaction status // Transactions can be in one of three states: // 1. In transactions pool; 2. In non-finalized Dag block 3. Executed @@ -245,7 +245,7 @@ class TransactionManager : public std::enable_shared_from_this db_{nullptr}; - std::shared_ptr final_chain_{nullptr}; + std::shared_ptr final_chain_{nullptr}; LOG_OBJECTS_DEFINE }; diff --git a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp index 3fe2cacaed..046062b0ab 100644 --- a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp @@ -6,6 +6,8 @@ namespace taraxa { +class PbftVote; + enum class TwoTPlusOneVotedBlockType { SoftVotedBlock, CertVotedBlock, NextVotedBlock, NextVotedNullBlock }; struct VerifiedVotes { diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index bf666d3a1e..4b77962837 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -16,6 +16,9 @@ namespace taraxa { class Network; class SlashingManager; +class PbftVote; +struct PbftConfig; +struct FullNodeConfig; namespace network::tarcap { class TaraxaPeer; @@ -26,9 +29,8 @@ class TaraxaPeer; */ class VoteManager { public: - VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, const secret_t& node_sk, - const vrf_wrapper::vrf_sk_t& vrf_sk, std::shared_ptr db, std::shared_ptr pbft_chain, - std::shared_ptr final_chain, std::shared_ptr key_manager, + VoteManager(const FullNodeConfig& config, std::shared_ptr db, std::shared_ptr pbft_chain, + std::shared_ptr final_chain, std::shared_ptr key_manager, std::shared_ptr slashing_manager); ~VoteManager() = default; VoteManager(const VoteManager&) = delete; @@ -106,8 +108,7 @@ class VoteManager { * @param block_hash * @param batch */ - void resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, - DbStorage::Batch& batch); + void resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, Batch& batch); /** * @brief Check reward votes for specified pbft block @@ -150,7 +151,7 @@ class VoteManager { * * @param write_batch */ - void clearOwnVerifiedVotes(DbStorage::Batch& write_batch); + void clearOwnVerifiedVotes(Batch& write_batch); /** * @brief Place a vote, save it in the verified votes queue, and gossip to peers @@ -200,7 +201,7 @@ class VoteManager { bool voteAlreadyValidated(const vote_hash_t& vote_hash) const; /** - * @brief Generates vrf sorition and calculates its weight + * @brief Generates vrf sortition and calculates its weight * @return true if sortition weight > 0, otherwise false */ bool genAndValidateVrfSortition(PbftPeriod pbft_period, PbftRound pbft_round) const; @@ -211,7 +212,7 @@ class VoteManager { * @param period * @param round * @param votes_type - * @return emoty optional if no 2t+1 voted block was found, otherwise initialized optional with block hash + * @return empty optional if no 2t+1 voted block was found, otherwise initialized optional with block hash */ std::optional getTwoTPlusOneVotedBlock(PbftPeriod period, PbftRound round, TwoTPlusOneVotedBlockType type) const; @@ -228,7 +229,7 @@ class VoteManager { TwoTPlusOneVotedBlockType type) const; /** - * @brief Sets current pbft period & round. It also checks if we dont alredy have 2t+1 vote bundles(pf any type) for + * @brief Sets current pbft period & round. It also checks if we dont already have 2t+1 vote bundles(pf any type) for * the provided period & round and if so, it saves these bundles into db * * @param pbft_period @@ -278,7 +279,7 @@ class VoteManager { std::shared_ptr db_; std::shared_ptr pbft_chain_; - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; std::shared_ptr key_manager_; std::weak_ptr network_; std::shared_ptr slashing_manager_; diff --git a/libraries/core_libs/consensus/src/dag/dag.cpp b/libraries/core_libs/consensus/src/dag/dag.cpp index 227aeda47f..d6c45716fb 100644 --- a/libraries/core_libs/consensus/src/dag/dag.cpp +++ b/libraries/core_libs/consensus/src/dag/dag.cpp @@ -4,7 +4,6 @@ #include #include -#include #include #include #include @@ -12,10 +11,6 @@ #include #include "dag/dag.hpp" -#include "key_manager/key_manager.hpp" -#include "network/network.hpp" -#include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" -#include "transaction/transaction_manager.hpp" namespace taraxa { @@ -53,7 +48,7 @@ bool Dag::addVEEs(blk_hash_t const &new_vertex, blk_hash_t const &pivot, std::ve // Note: add edges, // *** important - // Add a new block, edges are pointing from pivot to new_veretx + // Add a new block, edges are pointing from pivot to new_vertex if (!pivot.isZero()) { if (hasVertex(pivot)) { std::tie(edge, res) = boost::add_edge_by_label(pivot, new_vertex, graph_); @@ -153,7 +148,7 @@ bool Dag::computeOrder(const blk_hash_t &anchor, std::vector &ordere dfs.push({cur.first, true}); std::vector> neighbors; // iterate through neighbors - for (std::tie(adj_s, adj_e) = adjacenct_vertices(cur.first, graph_); adj_s != adj_e; adj_s++) { + for (std::tie(adj_s, adj_e) = boost::adjacent_vertices(cur.first, graph_); adj_s != adj_e; adj_s++) { if (epfriend.find(index_map[*adj_s]) == epfriend.end()) { // not in this epoch continue; } @@ -188,7 +183,7 @@ bool Dag::reachable(vertex_t const &from, vertex_t const &to) const { vertex_t t = st.top(); st.pop(); vertex_adj_iter_t s, e; - for (std::tie(s, e) = adjacenct_vertices(t, graph_); s != e; ++s) { + for (std::tie(s, e) = boost::adjacent_vertices(t, graph_); s != e; ++s) { if (visited.count(*s)) continue; if (*s == target) return true; visited.insert(*s); @@ -226,7 +221,7 @@ std::vector PivotTree::getGhostPath(const blk_hash_t &vertex) const cur = st.top(); st.pop(); post_order.emplace_back(cur); - for (std::tie(s, e) = adjacenct_vertices(cur, graph_); s != e; s++) { + for (std::tie(s, e) = boost::adjacent_vertices(cur, graph_); s != e; s++) { st.emplace(*s); } } @@ -237,7 +232,7 @@ std::vector PivotTree::getGhostPath(const blk_hash_t &vertex) const for (auto const &n : post_order) { auto total_w = 0; // get childrens - for (std::tie(s, e) = adjacenct_vertices(n, graph_); s != e; s++) { + for (std::tie(s, e) = boost::adjacent_vertices(n, graph_); s != e; s++) { if (weight_map.count(*s)) { // bigger timestamp total_w += weight_map[*s]; } @@ -253,7 +248,7 @@ std::vector PivotTree::getGhostPath(const blk_hash_t &vertex) const size_t heavist = 0; vertex_t next = root; - for (std::tie(s, e) = adjacenct_vertices(root, graph_); s != e; s++) { + for (std::tie(s, e) = boost::adjacent_vertices(root, graph_); s != e; s++) { if (!weight_map.count(*s)) continue; // bigger timestamp size_t w = weight_map[*s]; assert(w > 0); @@ -262,7 +257,6 @@ std::vector PivotTree::getGhostPath(const blk_hash_t &vertex) const next = *s; } else if (w == heavist) { if (index_map[*s] < index_map[next]) { - heavist = w; next = *s; } } diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index cad7733b8c..9e42063db9 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -1,7 +1,5 @@ #include "dag/dag_block_proposer.hpp" -#include - #include "common/util.hpp" #include "dag/dag_manager.hpp" #include "final_chain/final_chain.hpp" @@ -13,27 +11,29 @@ namespace taraxa { using namespace vdf_sortition; -DagBlockProposer::DagBlockProposer(const DagBlockProposerConfig& bp_config, std::shared_ptr dag_mgr, +DagBlockProposer::DagBlockProposer(const FullNodeConfig& config, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, std::shared_ptr final_chain, std::shared_ptr db, - std::shared_ptr key_manager, addr_t node_addr, secret_t node_sk, - vrf_wrapper::vrf_sk_t vrf_sk, uint64_t pbft_gas_limit, uint64_t dag_gas_limit, - const state_api::Config& state_config) - : bp_config_(bp_config), + std::shared_ptr key_manager) + : bp_config_(config.genesis.dag.block_proposer), total_trx_shards_(std::max(bp_config_.shard, uint16_t(1))), dag_mgr_(std::move(dag_mgr)), trx_mgr_(std::move(trx_mgr)), final_chain_(std::move(final_chain)), key_manager_(std::move(key_manager)), db_(std::move(db)), - node_addr_(node_addr), - node_sk_(std::move(node_sk)), - vrf_sk_(std::move(vrf_sk)), + node_addr_(dev::toAddress(config.node_secret)), + node_sk_(config.node_secret), + vrf_sk_(config.vrf_secret), vrf_pk_(vrf_wrapper::getVrfPublicKey(vrf_sk_)), - kPbftGasLimit(pbft_gas_limit), - kDagGasLimit(dag_gas_limit), - kHardforks(state_config.hardforks), - kValidatorMaxVote(state_config.dpos.validator_maximum_stake / state_config.dpos.vote_eligibility_balance_step) { + kPbftGasLimit( + std::min(config.propose_pbft_gas_limit, config.genesis.getGasLimits(final_chain_->lastBlockNumber()).second)), + kDagGasLimit( + std::min(config.propose_dag_gas_limit, config.genesis.getGasLimits(final_chain_->lastBlockNumber()).first)), + kHardforks(config.genesis.state.hardforks), + kValidatorMaxVote(config.genesis.state.dpos.validator_maximum_stake / + config.genesis.state.dpos.vote_eligibility_balance_step) { + const auto& node_addr = node_addr_; LOG_OBJECTS_CREATE("DAG_PROPOSER"); // Add a random component in proposing stale blocks so that not all nodes propose stale blocks at the same time @@ -51,6 +51,11 @@ bool DagBlockProposer::proposeDagBlock() { return false; } + // Do not propose dag blocks if number of non finalized transactions is over the limit + if (trx_mgr_->getNonfinalizedTrxSize() > kMaxNonFinalizedTransactions) { + return false; + } + auto frontier = dag_mgr_->getDagFrontier(); LOG(log_dg_) << "Get frontier with pivot: " << frontier.pivot << " tips: " << frontier.tips; assert(!frontier.pivot.isZero()); @@ -73,9 +78,9 @@ bool DagBlockProposer::proposeDagBlock() { } uint64_t max_vote_count = 0; - const auto vote_count = final_chain_->dpos_eligible_vote_count(*proposal_period, node_addr_); + const auto vote_count = final_chain_->dposEligibleVoteCount(*proposal_period, node_addr_); if (*proposal_period < kHardforks.magnolia_hf.block_num) { - max_vote_count = final_chain_->dpos_eligible_total_vote_count(*proposal_period); + max_vote_count = final_chain_->dposEligibleTotalVoteCount(*proposal_period); } else { max_vote_count = kValidatorMaxVote; } @@ -86,6 +91,17 @@ bool DagBlockProposer::proposeDagBlock() { vdf_sortition::VdfSortition vdf(sortition_params, vrf_sk_, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), vote_count, max_vote_count); + + auto anchor = dag_mgr_->getAnchors().second; + if (frontier.pivot != anchor) { + if (dag_mgr_->getNonFinalizedBlocksSize().second > kMaxNonFinalizedDagBlocks) { + return false; + } + if (dag_mgr_->getNonFinalizedBlocksMinDifficulty() < vdf.getDifficulty()) { + return false; + } + } + if (vdf.isStale(sortition_params)) { if (last_propose_level_ == propose_level) { if (num_tries_ < max_num_tries_) { @@ -103,7 +119,7 @@ bool DagBlockProposer::proposeDagBlock() { } } - auto [transactions, estimations] = getShardedTrxs(*proposal_period, dag_mgr_->getDagConfig().gas_limit); + auto [transactions, estimations] = getShardedTrxs(*proposal_period, kDagGasLimit); if (transactions.empty()) { last_propose_level_ = propose_level; num_tries_ = 0; @@ -154,12 +170,12 @@ bool DagBlockProposer::proposeDagBlock() { auto dag_block = createDagBlock(std::move(frontier), propose_level, transactions, std::move(estimations), std::move(vdf)); - if (dag_mgr_->addDagBlock(std::move(dag_block), std::move(transactions), true).first) { - LOG(log_nf_) << "Proposed new DAG block " << dag_block.getHash() << ", pivot " << dag_block.getPivot() - << " , txs num " << dag_block.getTrxs().size(); + if (dag_mgr_->addDagBlock(dag_block, std::move(transactions), true).first) { + LOG(log_nf_) << "Proposed new DAG block " << dag_block->getHash() << ", pivot " << dag_block->getPivot() + << " , txs num " << dag_block->getTrxs().size(); proposed_blocks_count_ += 1; } else { - LOG(log_er_) << "Failed to add newly proposed dag block " << dag_block.getHash() << " into dag"; + LOG(log_er_) << "Failed to add newly proposed dag block " << dag_block->getHash() << " into dag"; } last_propose_level_ = propose_level; @@ -183,12 +199,14 @@ void DagBlockProposer::start() { while (!stopped_) { // Blocks are not proposed if we are behind the network and still syncing auto syncing = false; + auto packets_over_the_limit = false; if (auto net = network_.lock()) { syncing = net->pbft_syncing(); + packets_over_the_limit = net->packetQueueOverLimit(); } - // Only sleep if block was not proposed or if we are syncing, if block is proposed try to propose another block - // immediately - if (syncing || !proposeDagBlock()) { + // Only sleep if block was not proposed or if we are syncing or if packets queue is over the limit, if block is + // proposed try to propose another block immediately + if (syncing || packets_over_the_limit || !proposeDagBlock()) { thisThreadSleepForMilliSeconds(min_proposal_delay); } } @@ -315,8 +333,10 @@ vec_blk_t DagBlockProposer::selectDagBlockTips(const vec_blk_t& frontier_tips, u return tips; } -DagBlock DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, const SharedTransactions& trxs, - std::vector&& estimations, VdfSortition&& vdf) const { +std::shared_ptr DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, + const SharedTransactions& trxs, + std::vector&& estimations, + VdfSortition&& vdf) const { // When we propose block we know it is valid, no need for block verification with queue, // simply add the block to the DAG vec_trx_t trx_hashes; @@ -331,21 +351,19 @@ DagBlock DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, frontier.tips = selectDagBlockTips(frontier.tips, kPbftGasLimit - block_estimation); } - DagBlock block(frontier.pivot, std::move(level), std::move(frontier.tips), std::move(trx_hashes), block_estimation, - std::move(vdf), node_sk_); - - return block; + return std::make_shared(frontier.pivot, std::move(level), std::move(frontier.tips), std::move(trx_hashes), + block_estimation, std::move(vdf), node_sk_); } bool DagBlockProposer::isValidDposProposer(PbftPeriod propose_period) const { - if (final_chain_->last_block_number() < propose_period) { - LOG(log_wr_) << "Last finalized block period " << final_chain_->last_block_number() << " < propose_period " + if (final_chain_->lastBlockNumber() < propose_period) { + LOG(log_wr_) << "Last finalized block period " << final_chain_->lastBlockNumber() << " < propose_period " << propose_period; return false; } try { - return final_chain_->dpos_is_eligible(propose_period, node_addr_); + return final_chain_->dposIsEligible(propose_period, node_addr_); } catch (state_api::ErrFutureBlock& c) { LOG(log_wr_) << "Proposal period " << propose_period << " is too far ahead of DPOS. " << c.what(); return false; diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index fd5853dc48..36b4f22628 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -3,45 +3,41 @@ #include #include -#include #include #include #include #include +#include "config/config.hpp" #include "dag/dag.hpp" #include "key_manager/key_manager.hpp" #include "network/network.hpp" #include "transaction/transaction_manager.hpp" namespace taraxa { -DagManager::DagManager(const DagBlock &dag_genesis_block, addr_t node_addr, const SortitionConfig &sortition_config, - const DagConfig &dag_config, std::shared_ptr trx_mgr, - std::shared_ptr pbft_chain, std::shared_ptr final_chain, - std::shared_ptr db, std::shared_ptr key_manager, uint64_t pbft_gas_limit, - const state_api::Config &state_config, bool is_light_node, uint64_t light_node_history, - uint32_t max_levels_per_period, uint32_t dag_expiry_limit) try + +DagManager::DagManager(const FullNodeConfig &config, addr_t node_addr, std::shared_ptr trx_mgr, + std::shared_ptr pbft_chain, std::shared_ptr final_chain, + std::shared_ptr db, std::shared_ptr key_manager) try : max_level_(db->getLastBlocksLevel()), - pivot_tree_(std::make_shared(dag_genesis_block.getHash(), node_addr)), - total_dag_(std::make_shared(dag_genesis_block.getHash(), node_addr)), + pivot_tree_(std::make_shared(config.genesis.dag_genesis_block.getHash(), node_addr)), + total_dag_(std::make_shared(config.genesis.dag_genesis_block.getHash(), node_addr)), trx_mgr_(std::move(trx_mgr)), pbft_chain_(std::move(pbft_chain)), db_(std::move(db)), key_manager_(std::move(key_manager)), - anchor_(dag_genesis_block.getHash()), + anchor_(config.genesis.dag_genesis_block.getHash()), period_(0), - sortition_params_manager_(node_addr, sortition_config, db_), - dag_config_(dag_config), - genesis_block_(std::make_shared(dag_genesis_block)), - is_light_node_(is_light_node), - light_node_history_(light_node_history), - max_levels_per_period_(max_levels_per_period), - dag_expiry_limit_(dag_expiry_limit), + sortition_params_manager_(node_addr, config.genesis.sortition, db_), + dag_config_(config.genesis.dag), + genesis_block_(std::make_shared(config.genesis.dag_genesis_block)), + max_levels_per_period_(config.max_levels_per_period), + dag_expiry_limit_(config.dag_expiry_limit), seen_blocks_(cache_max_size_, cache_delete_step_), final_chain_(std::move(final_chain)), - kPbftGasLimit(pbft_gas_limit), - kHardforks(state_config.hardforks), - kValidatorMaxVote(state_config.dpos.validator_maximum_stake / state_config.dpos.vote_eligibility_balance_step) { + kGenesis(config.genesis), + kValidatorMaxVote(config.genesis.state.dpos.validator_maximum_stake / + config.genesis.state.dpos.vote_eligibility_balance_step) { LOG_OBJECTS_CREATE("DAGMGR"); if (auto ret = getLatestPivotAndTips(); ret) { frontier_.pivot = ret->first; @@ -50,13 +46,13 @@ DagManager::DagManager(const DagBlock &dag_genesis_block, addr_t node_addr, cons } } // Set DAG level proposal period map - if (!db_->getProposalPeriodForDagLevel(max_levels_per_period)) { + if (!db_->getProposalPeriodForDagLevel(max_levels_per_period_)) { // Node start from scratch - db_->saveProposalPeriodDagLevelsMap(max_levels_per_period, 0); + db_->saveProposalPeriodDagLevelsMap(max_levels_per_period_, 0); } recoverDag(); - if (is_light_node_) { - clearLightNodeHistory(); + if (config.is_light_node) { + clearLightNodeHistory(config.light_node_history); } } catch (std::exception &e) { std::cerr << e.what() << std::endl; @@ -91,9 +87,9 @@ void DagManager::drawPivotGraph(std::string const &str) const { pivot_tree_->drawGraph(str); } -std::pair> DagManager::pivotAndTipsAvailable(DagBlock const &blk) { - auto dag_blk_hash = blk.getHash(); - const auto pivot_hash = blk.getPivot(); +std::pair> DagManager::pivotAndTipsAvailable(const std::shared_ptr &blk) { + auto dag_blk_hash = blk->getHash(); + const auto pivot_hash = blk->getPivot(); const auto dag_blk_pivot = getDagBlock(pivot_hash); std::vector missing_tips_or_pivot; @@ -105,7 +101,7 @@ std::pair> DagManager::pivotAndTipsAvailable(DagBl missing_tips_or_pivot.push_back(pivot_hash); } - for (auto const &tip : blk.getTips()) { + for (auto const &tip : blk->getTips()) { auto tip_block = getDagBlock(tip); if (tip_block) { expected_level = std::max(expected_level, tip_block->getLevel() + 1); @@ -119,8 +115,8 @@ std::pair> DagManager::pivotAndTipsAvailable(DagBl return {false, missing_tips_or_pivot}; } - if (expected_level != blk.getLevel()) { - LOG(log_er_) << "DAG Block " << dag_blk_hash << " level " << blk.getLevel() + if (expected_level != blk->getLevel()) { + LOG(log_er_) << "DAG Block " << dag_blk_hash << " level " << blk->getLevel() << ", expected level: " << expected_level; return {false, missing_tips_or_pivot}; } @@ -133,9 +129,9 @@ DagFrontier DagManager::getDagFrontier() { return frontier_; } -std::pair> DagManager::addDagBlock(DagBlock &&blk, SharedTransactions &&trxs, - bool proposed, bool save) { - auto blk_hash = blk.getHash(); +std::pair> DagManager::addDagBlock(const std::shared_ptr &blk, + SharedTransactions &&trxs, bool proposed, bool save) { + auto blk_hash = blk->getHash(); { // One mutex protects the DagManager internal state, the other mutex ensures that dag blocks are gossiped in @@ -145,14 +141,14 @@ std::pair> DagManager::addDagBlock(DagBlock &&blk, { std::scoped_lock lock(mutex_); if (save) { - if (db_->dagBlockInDb(blk.getHash())) { + if (db_->dagBlockInDb(blk->getHash())) { // It is a valid scenario that two threads can receive same block from two peers and process at same time return {true, {}}; } - if (blk.getLevel() < dag_expiry_level_) { + if (blk->getLevel() < dag_expiry_level_) { LOG(log_nf_) << "Dropping old block: " << blk_hash << ". Expiry level: " << dag_expiry_level_ - << ". Block level: " << blk.getLevel(); + << ". Block level: " << blk->getLevel(); return {false, {}}; } @@ -165,20 +161,23 @@ std::pair> DagManager::addDagBlock(DagBlock &&blk, // Save the dag block db_->saveDagBlock(blk); } - seen_blocks_.insert(blk.getHash(), blk); - auto pivot_hash = blk.getPivot(); + seen_blocks_.insert(blk->getHash(), blk); + auto pivot_hash = blk->getPivot(); - std::vector tips = blk.getTips(); + std::vector tips = blk->getTips(); level_t current_max_level = max_level_; - max_level_ = std::max(current_max_level, blk.getLevel()); + max_level_ = std::max(current_max_level, blk->getLevel()); - addToDag(blk_hash, pivot_hash, tips, blk.getLevel()); + addToDag(blk_hash, pivot_hash, tips, blk->getLevel()); + if (non_finalized_blks_min_difficulty_ > blk->getDifficulty()) { + non_finalized_blks_min_difficulty_ = blk->getDifficulty(); + } updateFrontier(); } if (save) { block_verified_.emit(blk); - if (auto net = network_.lock()) { + if (std::shared_ptr net = network_.lock()) { net->gossipDagBlock(blk, proposed, trxs); } } @@ -284,9 +283,9 @@ std::vector DagManager::getDagBlockOrder(blk_hash_t const &anchor, P return blk_orders; } -void DagManager::clearLightNodeHistory() { +void DagManager::clearLightNodeHistory(uint64_t light_node_history) { bool dag_expiry_level_condition = dag_expiry_level_ > max_levels_per_period_ + 1; - bool period_over_history_condition = period_ > light_node_history_; + bool period_over_history_condition = period_ > light_node_history; if (period_over_history_condition && dag_expiry_level_condition) { const auto proposal_period = db_->getProposalPeriodForDagLevel(dag_expiry_level_ - max_levels_per_period_ - 1); assert(proposal_period); @@ -294,8 +293,8 @@ void DagManager::clearLightNodeHistory() { const uint64_t start = 0; // This prevents deleting any data needed for dag blocks proposal period, we only delete periods for the expired dag // blocks - const uint64_t end = std::min(period_ - light_node_history_, *proposal_period); - LOG(log_tr_) << "period_ - light_node_history_ " << period_ - light_node_history_; + const uint64_t end = std::min(period_ - light_node_history, *proposal_period); + LOG(log_tr_) << "period_ - light_node_history_ " << period_ - light_node_history; LOG(log_tr_) << "dag_expiry_level - max_levels_per_period_ - 1: " << dag_expiry_level_ - max_levels_per_period_ - 1 << " *proposal_period " << *proposal_period; LOG(log_tr_) << "Delete period history from: " << start << " to " << end; @@ -335,11 +334,11 @@ uint DagManager::setDagBlockOrder(blk_hash_t const &new_anchor, PbftPeriod perio } // Only update counter for blocks that are in the dag_order and not in memory DAG, this is only possible when pbft // syncing and processing period data - std::vector dag_blocks_to_update_counters; + std::vector> dag_blocks_to_update_counters; for (auto const &blk : dag_order) { if (non_finalized_blocks_set.count(blk) == 0) { auto dag_block = getDagBlock(blk); - dag_blocks_to_update_counters.push_back(*dag_block); + dag_blocks_to_update_counters.push_back(dag_block); } } @@ -364,6 +363,7 @@ uint DagManager::setDagBlockOrder(blk_hash_t const &new_anchor, PbftPeriod perio std::unordered_map> expired_dag_blocks_to_remove; std::vector expired_dag_blocks_transactions; + non_finalized_blks_min_difficulty_ = UINT32_MAX; for (auto &v : non_finalized_blocks) { for (auto &blk_hash : v.second) { if (dag_order_set.count(blk_hash) != 0) { @@ -375,10 +375,14 @@ uint DagManager::setDagBlockOrder(blk_hash_t const &new_anchor, PbftPeriod perio if (validateBlockNotExpired(dag_block, expired_dag_blocks_to_remove)) { addToDag(blk_hash, pivot_hash, dag_block->getTips(), dag_block->getLevel(), false); + if (non_finalized_blks_min_difficulty_ > dag_block->getDifficulty()) { + non_finalized_blks_min_difficulty_ = dag_block->getDifficulty(); + } } else { db_->removeDagBlock(blk_hash); seen_blocks_.erase(blk_hash); - for (const auto &trx : dag_block->getTrxs()) expired_dag_blocks_transactions.emplace_back(trx); + const auto dag_trxs = dag_block->getTrxs(); + std::copy(dag_trxs.begin(), dag_trxs.end(), std::back_inserter(expired_dag_blocks_transactions)); } } } @@ -485,38 +489,38 @@ void DagManager::recoverDag() { for (auto &blk : lvl.second) { // These are some sanity checks that difficulty is correct and block is truly non-finalized. // This is only done on startup - auto period = db_->getDagBlockPeriod(blk.getHash()); + auto period = db_->getDagBlockPeriod(blk->getHash()); if (period != nullptr) { LOG(log_er_) << "Nonfinalized Dag Block actually finalized in period " << period->first; break; } else { - auto propose_period = db_->getProposalPeriodForDagLevel(blk.getLevel()); + auto propose_period = db_->getProposalPeriodForDagLevel(blk->getLevel()); if (!propose_period.has_value()) { - LOG(log_er_) << "No propose period for dag level " << blk.getLevel() << " found"; + LOG(log_er_) << "No propose period for dag level " << blk->getLevel() << " found"; assert(false); break; } - const auto pk = key_manager_->getVrfKey(*propose_period, blk.getSender()); + const auto pk = key_manager_->getVrfKey(*propose_period, blk->getSender()); if (!pk) { - LOG(log_er_) << "DAG block " << blk.getHash() << " with " << blk.getLevel() - << " level is missing VRF key for sender " << blk.getSender(); + LOG(log_er_) << "DAG block " << blk->getHash() << " with " << blk->getLevel() + << " level is missing VRF key for sender " << blk->getSender(); break; } // Verify VDF solution try { uint64_t max_vote_count = 0; - const auto vote_count = final_chain_->dpos_eligible_vote_count(*propose_period, blk.getSender()); - if (*propose_period < kHardforks.magnolia_hf.block_num) { - max_vote_count = final_chain_->dpos_eligible_total_vote_count(*propose_period); + const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk->getSender()); + if (*propose_period < kGenesis.state.hardforks.magnolia_hf.block_num) { + max_vote_count = final_chain_->dposEligibleTotalVoteCount(*propose_period); } else { max_vote_count = kValidatorMaxVote; } - blk.verifyVdf(sortition_params_manager_.getSortitionParams(*propose_period), - db_->getPeriodBlockHash(*propose_period), *pk, vote_count, max_vote_count); + blk->verifyVdf(sortition_params_manager_.getSortitionParams(*propose_period), + db_->getPeriodBlockHash(*propose_period), *pk, vote_count, max_vote_count); } catch (vdf_sortition::VdfSortition::InvalidVdfSortition const &e) { - LOG(log_er_) << "DAG block " << blk.getHash() << " with " << blk.getLevel() - << " level failed on VDF verification with pivot hash " << blk.getPivot() << " reason " + LOG(log_er_) << "DAG block " << blk->getHash() << " with " << blk->getLevel() + << " level failed on VDF verification with pivot hash " << blk->getPivot() << " reason " << e.what(); break; } @@ -525,14 +529,14 @@ void DagManager::recoverDag() { // In case an invalid block somehow ended in DAG db, remove it auto res = pivotAndTipsAvailable(blk); if (res.first) { - if (!addDagBlock(std::move(blk), {}, false, false).first) { - LOG(log_er_) << "DAG block " << blk.getHash() << " could not be added to DAG on startup, removing from db"; - db_->removeDagBlock(blk.getHash()); + if (!addDagBlock(blk, {}, false, false).first) { + LOG(log_er_) << "DAG block " << blk->getHash() << " could not be added to DAG on startup, removing from db"; + db_->removeDagBlock(blk->getHash()); } } else { - LOG(log_er_) << "DAG block " << blk.getHash() + LOG(log_er_) << "DAG block " << blk->getHash() << " could not be added to DAG on startup since it has missing tip/pivot"; - db_->removeDagBlock(blk.getHash()); + db_->removeDagBlock(blk->getHash()); } } } @@ -574,6 +578,11 @@ DagManager::getNonFinalizedBlocksWithTransactions(const std::unordered_set DagManager::getNonFinalizedBlocksSize() const { std::shared_lock lock(mutex_); @@ -586,34 +595,34 @@ std::pair DagManager::getNonFinalizedBlocksSize() const { } std::pair DagManager::verifyBlock( - const DagBlock &blk, const std::unordered_map> &trxs) { - const auto &block_hash = blk.getHash(); - vec_trx_t const &all_block_trx_hashes = blk.getTrxs(); + const std::shared_ptr &blk, const std::unordered_map> &trxs) { + const auto &block_hash = blk->getHash(); + vec_trx_t const &all_block_trx_hashes = blk->getTrxs(); vec_trx_t trx_hashes_to_query; SharedTransactions all_block_trxs; // Verify tips/pivot count and uniqueness std::unordered_set unique_tips_pivot; - unique_tips_pivot.insert(blk.getPivot()); - if (blk.getTips().size() > kDagBlockMaxTips) { - LOG(log_er_) << "DAG Block " << block_hash << " tips count " << blk.getTips().size() << " over the limit"; + unique_tips_pivot.insert(blk->getPivot()); + if (blk->getTips().size() > kDagBlockMaxTips) { + LOG(log_er_) << "DAG Block " << block_hash << " tips count " << blk->getTips().size() << " over the limit"; return {VerifyBlockReturnType::FailedTipsVerification, {}}; } - for (auto const &tip : blk.getTips()) { + for (auto const &tip : blk->getTips()) { if (!unique_tips_pivot.insert(tip).second) { LOG(log_er_) << "DAG Block " << block_hash << " tip " << tip << " duplicate"; return {VerifyBlockReturnType::FailedTipsVerification, {}}; } } - auto propose_period = db_->getProposalPeriodForDagLevel(blk.getLevel()); + auto propose_period = db_->getProposalPeriodForDagLevel(blk->getLevel()); // Verify DPOS if (!propose_period.has_value()) { // Cannot find the proposal period in DB yet. The slow node gets an ahead block, remove from seen_blocks - LOG(log_nf_) << "Cannot find proposal period in DB for DAG block " << blk.getHash(); + LOG(log_nf_) << "Cannot find proposal period in DB for DAG block " << blk->getHash(); seen_blocks_.erase(block_hash); return {VerifyBlockReturnType::AheadBlock, {}}; } @@ -645,42 +654,42 @@ std::pair DagManager::ver all_block_trxs.emplace_back(std::move(t)); } - if (blk.getLevel() < dag_expiry_level_) { - LOG(log_nf_) << "Dropping old block: " << blk.getHash() << ". Expiry level: " << dag_expiry_level_ - << ". Block level: " << blk.getLevel(); + if (blk->getLevel() < dag_expiry_level_) { + LOG(log_nf_) << "Dropping old block: " << blk->getHash() << ". Expiry level: " << dag_expiry_level_ + << ". Block level: " << blk->getLevel(); return {VerifyBlockReturnType::ExpiredBlock, {}}; } // Verify VDF solution - const auto pk = key_manager_->getVrfKey(*propose_period, blk.getSender()); + const auto pk = key_manager_->getVrfKey(*propose_period, blk->getSender()); if (!pk) { - LOG(log_er_) << "DAG block " << blk.getHash() << " with " << blk.getLevel() - << " level is missing VRF key for sender " << blk.getSender(); + LOG(log_er_) << "DAG block " << blk->getHash() << " with " << blk->getLevel() + << " level is missing VRF key for sender " << blk->getSender(); return {VerifyBlockReturnType::FailedVdfVerification, {}}; } try { const auto proposal_period_hash = db_->getPeriodBlockHash(*propose_period); uint64_t max_vote_count = 0; - const auto vote_count = final_chain_->dpos_eligible_vote_count(*propose_period, blk.getSender()); - if (*propose_period < kHardforks.magnolia_hf.block_num) { - max_vote_count = final_chain_->dpos_eligible_total_vote_count(*propose_period); + const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk->getSender()); + if (*propose_period < kGenesis.state.hardforks.magnolia_hf.block_num) { + max_vote_count = final_chain_->dposEligibleTotalVoteCount(*propose_period); } else { max_vote_count = kValidatorMaxVote; } - blk.verifyVdf(sortition_params_manager_.getSortitionParams(*propose_period), proposal_period_hash, *pk, vote_count, - max_vote_count); + blk->verifyVdf(sortition_params_manager_.getSortitionParams(*propose_period), proposal_period_hash, *pk, vote_count, + max_vote_count); } catch (vdf_sortition::VdfSortition::InvalidVdfSortition const &e) { - LOG(log_er_) << "DAG block " << block_hash << " with " << blk.getLevel() - << " level failed on VDF verification with pivot hash " << blk.getPivot() << " reason " << e.what(); + LOG(log_er_) << "DAG block " << block_hash << " with " << blk->getLevel() + << " level failed on VDF verification with pivot hash " << blk->getPivot() << " reason " << e.what(); LOG(log_er_) << "period from map: " << *propose_period << " current: " << pbft_chain_->getPbftChainSize(); return {VerifyBlockReturnType::FailedVdfVerification, {}}; } - auto dag_block_sender = blk.getSender(); + auto dag_block_sender = blk->getSender(); bool dpos_qualified; try { - dpos_qualified = final_chain_->dpos_is_eligible(*propose_period, dag_block_sender); + dpos_qualified = final_chain_->dposIsEligible(*propose_period, dag_block_sender); } catch (state_api::ErrFutureBlock &c) { LOG(log_er_) << "Verify proposal period " << *propose_period << " is too far ahead of DPOS. " << c.what(); return {VerifyBlockReturnType::FutureBlock, {}}; @@ -688,32 +697,34 @@ std::pair DagManager::ver if (!dpos_qualified) { LOG(log_er_) << "Invalid DAG block DPOS. DAG block " << blk << " is not eligible for DPOS at period " << *propose_period << " for sender " << dag_block_sender.toString() << " current period " - << final_chain_->last_block_number(); + << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::NotEligible, {}}; } { u256 total_block_weight = 0; - auto block_gas_estimation = blk.getGasEstimation(); + auto block_gas_estimation = blk->getGasEstimation(); for (const auto &trx : all_block_trxs) { total_block_weight += trx_mgr_->estimateTransactionGas(trx, propose_period); } if (total_block_weight != block_gas_estimation) { - LOG(log_er_) << "Invalid block_gas_estimation. DAG block " << blk.getHash() + LOG(log_er_) << "Invalid block_gas_estimation. DAG block " << blk->getHash() << " block_gas_estimation: " << block_gas_estimation << " total_block_weight " << total_block_weight - << " current period " << final_chain_->last_block_number(); + << " current period " << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::IncorrectTransactionsEstimation, {}}; } - if (total_block_weight > getDagConfig().gas_limit) { - LOG(log_er_) << "BlockTooBig. DAG block " << blk.getHash() << " gas_limit: " << getDagConfig().gas_limit + const auto [dag_gas_limit, pbft_gas_limit] = kGenesis.getGasLimits(*propose_period); + + if (total_block_weight > dag_gas_limit) { + LOG(log_er_) << "BlockTooBig. DAG block " << blk->getHash() << " gas_limit: " << dag_gas_limit << " total_block_weight " << total_block_weight << " current period " - << final_chain_->last_block_number(); + << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::BlockTooBig, {}}; } - if ((blk.getTips().size() + 1) > kPbftGasLimit / getDagConfig().gas_limit) { - for (const auto &t : blk.getTips()) { + if ((blk->getTips().size() + 1) > pbft_gas_limit / dag_gas_limit) { + for (const auto &t : blk->getTips()) { const auto tip_blk = getDagBlock(t); if (tip_blk == nullptr) { LOG(log_er_) << "DAG Block " << block_hash << " tip " << t << " not present"; @@ -721,16 +732,16 @@ std::pair DagManager::ver } block_gas_estimation += tip_blk->getGasEstimation(); } - if (block_gas_estimation > kPbftGasLimit) { - LOG(log_er_) << "BlockTooBig. DAG block " << blk.getHash() << " with tips has limit: " << kPbftGasLimit + if (block_gas_estimation > pbft_gas_limit) { + LOG(log_er_) << "BlockTooBig. DAG block " << blk->getHash() << " with tips has limit: " << pbft_gas_limit << " block_gas_estimation " << block_gas_estimation << " current period " - << final_chain_->last_block_number(); + << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::BlockTooBig, {}}; } } } - LOG(log_dg_) << "Verified DAG block " << blk.getHash(); + LOG(log_dg_) << "Verified DAG block " << blk->getHash(); return {VerifyBlockReturnType::Verified, std::move(all_block_trxs)}; } @@ -746,7 +757,7 @@ bool DagManager::isDagBlockKnown(const blk_hash_t &hash) const { std::shared_ptr DagManager::getDagBlock(const blk_hash_t &hash) const { auto blk = seen_blocks_.get(hash); if (blk.second) { - return std::make_shared(blk.first); + return blk.first; } if (hash == genesis_block_->getHash()) { return genesis_block_; diff --git a/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp b/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp index 6d4015bc2d..ab8894fda7 100644 --- a/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp @@ -1,7 +1,5 @@ #include "dag/sortition_params_manager.hpp" -#include "pbft/pbft_block.hpp" - namespace taraxa { SortitionParamsChange::SortitionParamsChange(PbftPeriod period, uint16_t efficiency, const VrfParams& vrf) @@ -48,14 +46,16 @@ SortitionParamsManager::SortitionParamsManager(const addr_t& node_addr, Sortitio auto period = params_changes_.back().period + 1; ignored_efficiency_counter_ = 0; while (true) { - auto data = db_->getPeriodDataRaw(period); - if (data.size() == 0) break; + auto period_data = db_->getPeriodData(period); + if (!period_data.has_value()) { + break; + } + period++; - PeriodData period_data(data); - if (period_data.pbft_blk->getPivotDagBlockHash() != kNullBlockHash) { + if (period_data->pbft_blk->getPivotDagBlockHash() != kNullBlockHash) { if (static_cast(ignored_efficiency_counter_) >= config_.changing_interval - config_.computation_interval) { - dag_efficiencies_.push_back(calculateDagEfficiency(period_data)); + dag_efficiencies_.push_back(calculateDagEfficiency(*period_data)); } else { ignored_efficiency_counter_++; } @@ -79,7 +79,7 @@ SortitionParams SortitionParamsManager::getSortitionParams(std::optionalgetTrxs(); total_transactions_count += trxs.size(); } @@ -100,7 +100,7 @@ void SortitionParamsManager::cleanup() { } } -void SortitionParamsManager::pbftBlockPushed(const PeriodData& block, DbStorage::Batch& batch, +void SortitionParamsManager::pbftBlockPushed(const PeriodData& block, Batch& batch, PbftPeriod non_empty_pbft_chain_size) { if (config_.changing_interval == 0) { return; @@ -162,18 +162,18 @@ int32_t getClosestThreshold(const EfficienciesMap& efficiencies, uint16_t target EfficienciesMap SortitionParamsManager::getEfficienciesToUpperRange(uint16_t efficiency, int32_t last_threshold_upper) const { - // efficiencies_to_uppper_range provide mapping from efficiency to VRF upper threshold, params_changes contain + // efficiencies_to_upper_range provide mapping from efficiency to VRF upper threshold, params_changes contain // efficiency for previous setting so mapping is done efficiency of i relates to VRF upper threshold of (i + 1) - EfficienciesMap efficiencies_to_uppper_range; + EfficienciesMap efficiencies_to_upper_range; for (uint32_t i = 1; i < params_changes_.size(); i++) { - efficiencies_to_uppper_range[params_changes_[i].interval_efficiency] = + efficiencies_to_upper_range[params_changes_[i].interval_efficiency] = params_changes_[i - 1].vrf_params.threshold_upper; } if (params_changes_.size() > 1) { - efficiencies_to_uppper_range[efficiency] = last_threshold_upper; + efficiencies_to_upper_range[efficiency] = last_threshold_upper; } - return efficiencies_to_uppper_range; + return efficiencies_to_upper_range; } int32_t SortitionParamsManager::getNewUpperRange(uint16_t efficiency) const { @@ -192,17 +192,17 @@ int32_t SortitionParamsManager::getNewUpperRange(uint16_t efficiency) const { threshold_change *= -1; } - auto efficiencies_to_uppper_range = getEfficienciesToUpperRange(efficiency, last_threshold_upper); + auto efficiencies_to_upper_range = getEfficienciesToUpperRange(efficiency, last_threshold_upper); // Check if all params are below, over target efficiency or empty. If so target is still not reached and change it by // calculated amount - if (efficiencies_to_uppper_range.empty() || (efficiencies_to_uppper_range.rbegin()->first < target_efficiency) || - (efficiencies_to_uppper_range.begin()->first >= target_efficiency)) { + if (efficiencies_to_upper_range.empty() || (efficiencies_to_upper_range.rbegin()->first < target_efficiency) || + (efficiencies_to_upper_range.begin()->first >= target_efficiency)) { return last_threshold_upper + threshold_change; } const auto closest_threshold = - getClosestThreshold(efficiencies_to_uppper_range, target_efficiency, is_over_target_efficiency); + getClosestThreshold(efficiencies_to_upper_range, target_efficiency, is_over_target_efficiency); const bool is_over_last_threshold = closest_threshold >= last_threshold_upper; diff --git a/libraries/core_libs/consensus/src/final_chain/data.cpp b/libraries/core_libs/consensus/src/final_chain/data.cpp index 5747ea9ad9..8fc6cd1876 100644 --- a/libraries/core_libs/consensus/src/final_chain/data.cpp +++ b/libraries/core_libs/consensus/src/final_chain/data.cpp @@ -1,38 +1,68 @@ #include "final_chain/data.hpp" +#include #include #include "common/constants.hpp" +#include "pbft/pbft_block.hpp" namespace taraxa::final_chain { -const h256& BlockHeader::uncles_hash() { return EmptyRLPListSHA3(); } +dev::bytes BlockHeaderData::serializeForDB() const { return util::rlp_enc(*this); } + +RLP_FIELDS_DEFINE(BlockHeaderData, parent_hash, state_root, transactions_root, receipts_root, log_bloom, gas_used, + total_reward) + +BlockHeader::BlockHeader(std::string&& raw_header_data) + : BlockHeaderData(util::rlp_dec(dev::RLP(raw_header_data))) {} + +BlockHeader::BlockHeader(std::string&& raw_header_data_, const PbftBlock& pbft_, uint64_t gas_limit_) + : BlockHeader(std::move(raw_header_data_)) { + setFromPbft(pbft_); + gas_limit = gas_limit_; + hash = dev::sha3(ethereumRlp()); +} + +void BlockHeader::setFromPbft(const PbftBlock& pbft) { + author = pbft.getBeneficiary(); + timestamp = pbft.getTimestamp(); + number = pbft.getPeriod(); + extra_data = pbft.getExtraDataRlp(); +} + +h256 const& BlockHeader::unclesHash() { return EmptyRLPListSHA3(); } const Nonce& BlockHeader::nonce() { return EmptyNonce(); } const u256& BlockHeader::difficulty() { return ZeroU256(); } -const h256& BlockHeader::mix_hash() { return ZeroHash(); } +const h256& BlockHeader::mixHash() { return ZeroHash(); } -std::shared_ptr BlockHeader::from_rlp(const dev::RLP& rlp) { +std::shared_ptr BlockHeader::fromRLP(const dev::RLP& rlp) { auto ret = std::make_shared(); ret->rlp(rlp); dev::RLPStream encoding; - ret->ethereum_rlp(encoding); + ret->ethereumRlp(encoding); ret->size = encoding.out().size(); return ret; } +void BlockHeader::ethereumRlp(dev::RLPStream& encoding) const { + util::rlp_tuple(encoding, parent_hash, BlockHeader::unclesHash(), author, state_root, transactions_root, + receipts_root, log_bloom, BlockHeader::difficulty(), number, gas_limit, gas_used, timestamp, + extra_data, BlockHeader::mixHash(), BlockHeader::nonce()); +} + +dev::bytes BlockHeader::ethereumRlp() const { + dev::RLPStream encoding; + ethereumRlp(encoding); + return encoding.invalidate(); +} + // TODO[2888]: remove hash field to not store it in the db RLP_FIELDS_DEFINE(BlockHeader, hash, parent_hash, author, state_root, transactions_root, receipts_root, log_bloom, number, gas_limit, gas_used, timestamp, total_reward, extra_data) -void BlockHeader::ethereum_rlp(dev::RLPStream& encoding) const { - util::rlp_tuple(encoding, parent_hash, BlockHeader::uncles_hash(), author, state_root, transactions_root, - receipts_root, log_bloom, BlockHeader::difficulty(), number, gas_limit, gas_used, timestamp, - extra_data, BlockHeader::mix_hash(), BlockHeader::nonce()); -} - RLP_FIELDS_DEFINE(LogEntry, address, topics, data) LogBloom LogEntry::bloom() const { diff --git a/libraries/core_libs/consensus/src/final_chain/final_chai_impl.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp similarity index 51% rename from libraries/core_libs/consensus/src/final_chain/final_chai_impl.cpp rename to libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 6e623da3d1..0c493ec2c4 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chai_impl.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -1,14 +1,18 @@ +#include "final_chain/final_chain.hpp" + #include "common/encoding_solidity.hpp" -#include "final_chain/final_chain_impl.hpp" +#include "common/util.hpp" +#include "final_chain/state_api_data.hpp" #include "final_chain/trie_common.hpp" +#include "pbft/pbft_block.hpp" +#include "transaction/system_transaction.hpp" namespace taraxa::final_chain { - -FinalChainImpl::FinalChainImpl(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, - const addr_t& node_addr) +FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, + const addr_t& node_addr) : db_(db), kBlockGasLimit(config.genesis.pbft.gas_limit), - state_api_([this](auto n) { return block_hash(n).value_or(ZeroHash()); }, // + state_api_([this](auto n) { return blockHash(n).value_or(ZeroHash()); }, // config.genesis.state, config.opts_final_chain, { db->stateDbStoragePath().string(), @@ -17,13 +21,13 @@ FinalChainImpl::FinalChainImpl(const std::shared_ptr& db, const taraxa::Full kMaxLevelsPerPeriod(config.max_levels_per_period), rewards_( config.genesis.pbft.committee_size, config.genesis.state.hardforks, db_, - [this](EthBlockNumber n) { return dpos_eligible_total_vote_count(n); }, + [this](EthBlockNumber n) { return dposEligibleTotalVoteCount(n); }, state_api_.get_last_committed_state_descriptor().blk_num), - block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_header(blk); }), - block_hashes_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_hash(blk); }), - transactions_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_transactions(blk); }), + block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return getBlockHeader(blk); }), + block_hashes_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return getBlockHash(blk); }), + transactions_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return getTransactions(blk); }), transaction_hashes_cache_(config.final_chain_cache_in_blocks, - [this](uint64_t blk) { return get_transaction_hashes(blk); }), + [this](uint64_t blk) { return getTransactionHashes(blk); }), accounts_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk, const addr_t& addr) { return state_api_.get_account(blk, addr); }), total_vote_count_cache_(config.final_chain_cache_in_blocks, @@ -34,17 +38,17 @@ FinalChainImpl::FinalChainImpl(const std::shared_ptr& db, const taraxa::Full dpos_is_eligible_cache_( config.final_chain_cache_in_blocks, [this](uint64_t blk, const addr_t& addr) { return state_api_.dpos_is_eligible(blk, addr); }), - kHardforksConfig(config.genesis.state.hardforks) { + kConfig(config) { LOG_OBJECTS_CREATE("EXECUTOR"); num_executed_dag_blk_ = db_->getStatusField(taraxa::StatusDbField::ExecutedBlkCount); num_executed_trx_ = db_->getStatusField(taraxa::StatusDbField::ExecutedTrxCount); auto state_db_descriptor = state_api_.get_last_committed_state_descriptor(); - auto last_blk_num = db_->lookup_int(DBMetaKeys::LAST_NUMBER, DB::Columns::final_chain_meta); + auto last_blk_num = db_->lookup_int(DBMetaKeys::LAST_NUMBER, DbStorage::Columns::final_chain_meta); // If we don't have genesis block in db then create and push it if (!last_blk_num) [[unlikely]] { auto batch = db_->createWriteBatch(); - auto header = append_block(batch, addr_t(), config.genesis.dag_genesis_block.getTimestamp(), kBlockGasLimit, - state_db_descriptor.state_root, u256(0)); + auto header = makeGenesisHeader(state_db_descriptor.state_root); + appendBlock(batch, header, {}, {}); block_headers_cache_.append(header->number, header); last_block_number_ = header->number; @@ -54,20 +58,19 @@ FinalChainImpl::FinalChainImpl(const std::shared_ptr& db, const taraxa::Full if (*last_blk_num != state_db_descriptor.blk_num) [[unlikely]] { auto batch = db_->createWriteBatch(); for (auto block_n = *last_blk_num; block_n != state_db_descriptor.blk_num; --block_n) { - auto raw_period_data = db_->getPeriodDataRaw(block_n); - assert(raw_period_data.size() > 0); + auto period_data = db_->getPeriodData(block_n); + assert(period_data.has_value()); - const PeriodData period_data(std::move(raw_period_data)); - if (period_data.transactions.size()) { - num_executed_dag_blk_ -= period_data.dag_blocks.size(); - num_executed_trx_ -= period_data.transactions.size(); + if (period_data->transactions.size()) { + num_executed_dag_blk_ -= period_data->dag_blocks.size(); + num_executed_trx_ -= period_data->transactions.size(); } auto period_system_transactions = db_->getPeriodSystemTransactionsHashes(block_n); num_executed_trx_ -= period_system_transactions.size(); } - db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedBlkCount, num_executed_dag_blk_.load()); - db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedTrxCount, num_executed_trx_.load()); - db_->insert(batch, DB::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, state_db_descriptor.blk_num); + db_->insert(batch, DbStorage::Columns::status, StatusDbField::ExecutedBlkCount, num_executed_dag_blk_.load()); + db_->insert(batch, DbStorage::Columns::status, StatusDbField::ExecutedTrxCount, num_executed_trx_.load()); + db_->insert(batch, DbStorage::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, state_db_descriptor.blk_num); db_->commitWriteBatch(batch); last_blk_num = state_db_descriptor.blk_num; } @@ -92,9 +95,9 @@ FinalChainImpl::FinalChainImpl(const std::shared_ptr& db, const taraxa::Full } } -void FinalChainImpl::stop() { executor_thread_.join(); } +void FinalChain::stop() { executor_thread_.join(); } -std::future> FinalChainImpl::finalize( +std::future> FinalChain::finalize( PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, std::shared_ptr&& anchor) { auto p = std::make_shared>>(); boost::asio::post(executor_thread_, [this, new_blk = std::move(new_blk), @@ -106,35 +109,37 @@ std::future> FinalChainImpl::finalize( return p->get_future(); } -EthBlockNumber FinalChainImpl::delegation_delay() const { return delegation_delay_; } +EthBlockNumber FinalChain::delegationDelay() const { return delegation_delay_; } -SharedTransaction FinalChainImpl::make_bridge_finalization_transaction() { +SharedTransaction FinalChain::makeBridgeFinalizationTransaction() { const static auto finalize_method = util::EncodingSolidity::packFunctionCall("finalizeEpoch()"); - auto account = get_account(kTaraxaSystemAccount).value_or(state_api::ZeroAccount); + auto account = getAccount(kTaraxaSystemAccount).value_or(state_api::ZeroAccount); auto trx = std::make_shared(account.nonce, 0, 0, kBlockGasLimit, finalize_method, - kHardforksConfig.ficus_hf.bridge_contract_address); + kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address); return trx; } -bool FinalChainImpl::isNeedToFinalize(EthBlockNumber blk_num) const { +bool FinalChain::isNeedToFinalize(EthBlockNumber blk_num) const { const static auto get_bridge_root_method = util::EncodingSolidity::packFunctionCall("shouldFinalizeEpoch()"); - return u256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, kHardforksConfig.ficus_hf.bridge_contract_address, + return u256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, + kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address, state_api::ZeroAccount.nonce, 0, 10000000, get_bridge_root_method}, blk_num) .code_retval) .convert_to(); } -std::vector FinalChainImpl::makeSystemTransactions(PbftPeriod blk_num) { +std::vector FinalChain::makeSystemTransactions(PbftPeriod blk_num) { std::vector system_transactions; - // Make system transactions blocks sooner than next pillar block period, - // e.g.: if pillar block period is 100, this will return true for period 100 - delegation_delay() == 95, 195, 295, + // Make system transactions blocks sooner than next pillar block period, + // e.g.: if pillar block period is 100, this will return true for period 100 - delegationDelay() == 95, 195, 295, // etc... - if (kHardforksConfig.ficus_hf.isPillarBlockPeriod(blk_num + delegation_delay())) { - if (const auto bridge_contract = get_account(kHardforksConfig.ficus_hf.bridge_contract_address); bridge_contract) { + if (kConfig.genesis.state.hardforks.ficus_hf.isPillarBlockPeriod(blk_num + delegationDelay())) { + if (const auto bridge_contract = getAccount(kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address); + bridge_contract) { if (bridge_contract->code_size && isNeedToFinalize(blk_num - 1)) { - auto finalize_trx = make_bridge_finalization_transaction(); + auto finalize_trx = makeBridgeFinalizationTransaction(); system_transactions.push_back(finalize_trx); } } @@ -143,12 +148,12 @@ std::vector FinalChainImpl::makeSystemTransactions(PbftPeriod return system_transactions; } -std::shared_ptr FinalChainImpl::finalize_(PeriodData&& new_blk, - std::vector&& finalized_dag_blk_hashes, - std::shared_ptr&& anchor) { +std::shared_ptr FinalChain::finalize_(PeriodData&& new_blk, + std::vector&& finalized_dag_blk_hashes, + std::shared_ptr&& anchor) { auto batch = db_->createWriteBatch(); - block_applying_emitter_.emit(block_header()->number + 1); + block_applying_emitter_.emit(blockHeader()->number + 1); /* // Any dag block producer producing duplicate dag blocks on same level should be slashed @@ -172,7 +177,7 @@ std::shared_ptr FinalChainImpl::finalize_(PeriodData&& auto all_transactions = new_blk.transactions; all_transactions.insert(all_transactions.end(), system_transactions.begin(), system_transactions.end()); std::vector evm_trxs; - append_evm_transactions(evm_trxs, all_transactions); + appendEvmTransactions(evm_trxs, all_transactions); const auto& [exec_results] = state_api_.execute_transactions( {new_blk.pbft_blk->getBeneficiary(), kBlockGasLimit, new_blk.pbft_blk->getTimestamp(), BlockHeader::difficulty()}, @@ -202,16 +207,14 @@ std::shared_ptr FinalChainImpl::finalize_(PeriodData&& auto rewards_stats = rewards_.processStats(new_blk, transactions_gas_used, batch); const auto& [state_root, total_reward] = state_api_.distribute_rewards(rewards_stats); - auto blk_header = - append_block(batch, new_blk.pbft_blk->getBeneficiary(), new_blk.pbft_blk->getTimestamp(), kBlockGasLimit, - state_root, total_reward, all_transactions, receipts, new_blk.pbft_blk->getExtraDataRlp()); + auto blk_header = appendBlock(batch, *new_blk.pbft_blk, state_root, total_reward, all_transactions, receipts); // Update number of executed DAG blocks and transactions auto num_executed_dag_blk = num_executed_dag_blk_ + finalized_dag_blk_hashes.size(); auto num_executed_trx = num_executed_trx_ + all_transactions.size(); if (!finalized_dag_blk_hashes.empty()) { - db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedBlkCount, num_executed_dag_blk); - db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedTrxCount, num_executed_trx); + db_->insert(batch, DbStorage::Columns::status, StatusDbField::ExecutedBlkCount, num_executed_dag_blk); + db_->insert(batch, DbStorage::Columns::status, StatusDbField::ExecutedTrxCount, num_executed_trx); LOG(log_nf_) << "Executed dag blocks #" << num_executed_dag_blk_ - finalized_dag_blk_hashes.size() << "-" << num_executed_dag_blk_ - 1 << " , Transactions count: " << all_transactions.size(); } @@ -268,52 +271,58 @@ std::shared_ptr FinalChainImpl::finalize_(PeriodData&& return result; } -void FinalChainImpl::prune(EthBlockNumber blk_n) { +void FinalChain::prune(EthBlockNumber blk_n) { LOG(log_nf_) << "Pruning data older than " << blk_n; - auto last_block_to_keep = get_block_header(blk_n); + auto last_block_to_keep = getBlockHeader(blk_n); if (last_block_to_keep) { auto block_to_keep = last_block_to_keep; std::vector state_root_to_keep; while (block_to_keep) { state_root_to_keep.push_back(block_to_keep->state_root); - block_to_keep = get_block_header(block_to_keep->number + 1); + block_to_keep = getBlockHeader(block_to_keep->number + 1); } - auto block_to_prune = get_block_header(last_block_to_keep->number - 1); + auto block_to_prune = getBlockHeader(last_block_to_keep->number - 1); while (block_to_prune && block_to_prune->number > 0) { - db_->remove(DB::Columns::final_chain_blk_by_number, block_to_prune->number); - db_->remove(DB::Columns::final_chain_blk_hash_by_number, block_to_prune->number); - db_->remove(DB::Columns::final_chain_blk_number_by_hash, block_to_prune->hash); - block_to_prune = get_block_header(block_to_prune->number - 1); + db_->remove(DbStorage::Columns::final_chain_blk_by_number, block_to_prune->number); + db_->remove(DbStorage::Columns::final_chain_blk_hash_by_number, block_to_prune->number); + db_->remove(DbStorage::Columns::final_chain_blk_number_by_hash, block_to_prune->hash); + block_to_prune = getBlockHeader(block_to_prune->number - 1); } - db_->compactColumn(DB::Columns::final_chain_blk_by_number); - db_->compactColumn(DB::Columns::final_chain_blk_hash_by_number); - db_->compactColumn(DB::Columns::final_chain_blk_number_by_hash); + db_->compactColumn(DbStorage::Columns::final_chain_blk_by_number); + db_->compactColumn(DbStorage::Columns::final_chain_blk_hash_by_number); + db_->compactColumn(DbStorage::Columns::final_chain_blk_number_by_hash); state_api_.prune(state_root_to_keep, last_block_to_keep->number); } } -std::shared_ptr FinalChainImpl::append_block(DB::Batch& batch, const addr_t& author, uint64_t timestamp, - uint64_t gas_limit, const h256& state_root, u256 total_reward, - const SharedTransactions& transactions, - const TransactionReceipts& receipts, - const bytes& extra_data) { - auto blk_header_ptr = std::make_shared(); - auto& blk_header = *blk_header_ptr; - auto last_block = block_header(); - blk_header.number = last_block ? last_block->number + 1 : 0; - blk_header.parent_hash = last_block ? last_block->hash : h256(); - blk_header.author = author; - blk_header.timestamp = timestamp; - blk_header.state_root = state_root; - blk_header.gas_used = receipts.empty() ? 0 : receipts.back().cumulative_gas_used; - blk_header.gas_limit = gas_limit; - blk_header.total_reward = total_reward; - blk_header.extra_data = extra_data; +std::shared_ptr FinalChain::appendBlock(Batch& batch, const PbftBlock& pbft_blk, const h256& state_root, + u256 total_reward, const SharedTransactions& transactions, + const TransactionReceipts& receipts) { + auto header = std::make_shared(); + header->setFromPbft(pbft_blk); + + if (auto last_block = blockHeader(); last_block) { + header->number = last_block->number + 1; + header->parent_hash = last_block->hash; + } + if (!receipts.empty()) { + header->gas_used = receipts.back().cumulative_gas_used; + } + header->state_root = state_root; + header->total_reward = total_reward; + header->gas_limit = kBlockGasLimit; + + return appendBlock(batch, std::move(header), transactions, receipts); +} + +std::shared_ptr FinalChain::appendBlock(Batch& batch, std::shared_ptr header, + const SharedTransactions& transactions, + const TransactionReceipts& receipts) { dev::BytesMap trxs_trie, receipts_trie; dev::RLPStream rlp_strm; - auto trx_idx = 0; + size_t trx_idx = 0; for (; trx_idx < transactions.size(); ++trx_idx) { const auto& trx = transactions[trx_idx]; auto i_rlp = util::rlp_enc(rlp_strm, trx_idx); @@ -321,53 +330,57 @@ std::shared_ptr FinalChainImpl::append_block(DB::Batch& batch, cons const auto& receipt = receipts[trx_idx]; receipts_trie[i_rlp] = util::rlp_enc(rlp_strm, receipt); - db_->insert(batch, DB::Columns::final_chain_receipt_by_trx_hash, trx->getHash(), rlp_strm.out()); + db_->insert(batch, DbStorage::Columns::final_chain_receipt_by_trx_hash, trx->getHash(), rlp_strm.out()); - blk_header.log_bloom |= receipt.bloom(); + header->log_bloom |= receipt.bloom(); } - blk_header.transactions_root = hash256(trxs_trie); - blk_header.receipts_root = hash256(receipts_trie); - rlp_strm.clear(), blk_header.ethereum_rlp(rlp_strm); - blk_header.hash = dev::sha3(rlp_strm.out()); - db_->insert(batch, DB::Columns::final_chain_blk_by_number, blk_header.number, util::rlp_enc(rlp_strm, blk_header)); - auto log_bloom_for_index = blk_header.log_bloom; - log_bloom_for_index.shiftBloom<3>(sha3(blk_header.author.ref())); - for (uint64_t level = 0, index = blk_header.number; level < c_bloomIndexLevels; ++level, index /= c_bloomIndexSize) { - auto chunk_id = block_blooms_chunk_id(level, index / c_bloomIndexSize); - auto chunk_to_alter = block_blooms(chunk_id); + + header->transactions_root = hash256(trxs_trie); + header->receipts_root = hash256(receipts_trie); + header->hash = dev::sha3(header->ethereumRlp()); + + auto data = header->serializeForDB(); + db_->insert(batch, DbStorage::Columns::final_chain_blk_by_number, header->number, data); + + auto log_bloom_for_index = header->log_bloom; + log_bloom_for_index.shiftBloom<3>(sha3(header->author.ref())); + for (uint64_t level = 0, index = header->number; level < c_bloomIndexLevels; ++level, index /= c_bloomIndexSize) { + auto chunk_id = blockBloomsChunkId(level, index / c_bloomIndexSize); + auto chunk_to_alter = blockBlooms(chunk_id); chunk_to_alter[index % c_bloomIndexSize] |= log_bloom_for_index; - db_->insert(batch, DB::Columns::final_chain_log_blooms_index, chunk_id, util::rlp_enc(rlp_strm, chunk_to_alter)); + db_->insert(batch, DbStorage::Columns::final_chain_log_blooms_index, chunk_id, + util::rlp_enc(rlp_strm, chunk_to_alter)); } - db_->insert(batch, DB::Columns::final_chain_blk_hash_by_number, blk_header.number, blk_header.hash); - db_->insert(batch, DB::Columns::final_chain_blk_number_by_hash, blk_header.hash, blk_header.number); - db_->insert(batch, DB::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, blk_header.number); + db_->insert(batch, DbStorage::Columns::final_chain_blk_hash_by_number, header->number, header->hash); + db_->insert(batch, DbStorage::Columns::final_chain_blk_number_by_hash, header->hash, header->number); + db_->insert(batch, DbStorage::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, header->number); - return blk_header_ptr; + return header; } -EthBlockNumber FinalChainImpl::last_block_number() const { return last_block_number_; } +EthBlockNumber FinalChain::lastBlockNumber() const { return last_block_number_; } -std::optional FinalChainImpl::block_number(const h256& h) const { - return db_->lookup_int(h, DB::Columns::final_chain_blk_number_by_hash); +std::optional FinalChain::blockNumber(const h256& h) const { + return db_->lookup_int(h, DbStorage::Columns::final_chain_blk_number_by_hash); } -std::optional FinalChainImpl::block_hash(std::optional n) const { - return block_hashes_cache_.get(last_if_absent(n)); +std::optional FinalChain::blockHash(std::optional n) const { + return block_hashes_cache_.get(lastIfAbsent(n)); } -std::shared_ptr FinalChainImpl::block_header(std::optional n) const { +std::shared_ptr FinalChain::blockHeader(std::optional n) const { if (!n) { return block_headers_cache_.last(); } return block_headers_cache_.get(*n); } -std::optional FinalChainImpl::transaction_location(const h256& trx_hash) const { +std::optional FinalChain::transactionLocation(const h256& trx_hash) const { return db_->getTransactionLocation(trx_hash); } -std::optional FinalChainImpl::transaction_receipt(const h256& trx_h) const { - auto raw = db_->lookup(trx_h, DB::Columns::final_chain_receipt_by_trx_hash); +std::optional FinalChain::transactionReceipt(const h256& trx_h) const { + auto raw = db_->lookup(trx_h, DbStorage::Columns::final_chain_receipt_by_trx_hash); if (raw.empty()) { return {}; } @@ -376,20 +389,20 @@ std::optional FinalChainImpl::transaction_receipt(const h256 return ret; } -uint64_t FinalChainImpl::transactionCount(std::optional n) const { - return db_->getTransactionCount(last_if_absent(n)); +uint64_t FinalChain::transactionCount(std::optional n) const { + return db_->getTransactionCount(lastIfAbsent(n)); } -std::shared_ptr FinalChainImpl::transaction_hashes(std::optional n) const { - return transaction_hashes_cache_.get(last_if_absent(n)); +std::shared_ptr FinalChain::transactionHashes(std::optional n) const { + return transaction_hashes_cache_.get(lastIfAbsent(n)); } -const SharedTransactions FinalChainImpl::transactions(std::optional n) const { - return transactions_cache_.get(last_if_absent(n)); +const SharedTransactions FinalChain::transactions(std::optional n) const { + return transactions_cache_.get(lastIfAbsent(n)); } -std::vector FinalChainImpl::withBlockBloom(const LogBloom& b, EthBlockNumber from, - EthBlockNumber to) const { +std::vector FinalChain::withBlockBloom(const LogBloom& b, EthBlockNumber from, + EthBlockNumber to) const { std::vector ret; // start from the top-level auto u = int_pow(c_bloomIndexSize, c_bloomIndexLevels); @@ -400,28 +413,27 @@ std::vector FinalChainImpl::withBlockBloom(const LogBloom& b, Et return ret; } -std::optional FinalChainImpl::get_account(const addr_t& addr, - std::optional blk_n) const { - return accounts_cache_.get(last_if_absent(blk_n), addr); +std::optional FinalChain::getAccount(const addr_t& addr, + std::optional blk_n) const { + return accounts_cache_.get(lastIfAbsent(blk_n), addr); } -void FinalChainImpl::update_state_config(const state_api::Config& new_config) { +void FinalChain::updateStateConfig(const state_api::Config& new_config) { delegation_delay_ = new_config.dpos.delegation_delay; state_api_.update_state_config(new_config); } -h256 FinalChainImpl::get_account_storage(const addr_t& addr, const u256& key, - std::optional blk_n) const { - return state_api_.get_account_storage(last_if_absent(blk_n), addr, key); +h256 FinalChain::getAccountStorage(const addr_t& addr, const u256& key, std::optional blk_n) const { + return state_api_.get_account_storage(lastIfAbsent(blk_n), addr, key); } -bytes FinalChainImpl::get_code(const addr_t& addr, std::optional blk_n) const { - return state_api_.get_code_by_address(last_if_absent(blk_n), addr); +bytes FinalChain::getCode(const addr_t& addr, std::optional blk_n) const { + return state_api_.get_code_by_address(lastIfAbsent(blk_n), addr); } -state_api::ExecutionResult FinalChainImpl::call(const state_api::EVMTransaction& trx, - std::optional blk_n) const { - auto const blk_header = block_header(last_if_absent(blk_n)); +state_api::ExecutionResult FinalChain::call(const state_api::EVMTransaction& trx, + std::optional blk_n) const { + auto const blk_header = blockHeader(lastIfAbsent(blk_n)); if (!blk_header) { throw std::runtime_error("Future block"); } @@ -435,9 +447,10 @@ state_api::ExecutionResult FinalChainImpl::call(const state_api::EVMTransaction& trx); } -std::string FinalChainImpl::trace(std::vector trxs, EthBlockNumber blk_n, - std::optional params) const { - const auto blk_header = block_header(last_if_absent(blk_n)); +std::string FinalChain::trace(std::vector state_trxs, + std::vector trxs, EthBlockNumber blk_n, + std::optional params) const { + const auto blk_header = blockHeader(lastIfAbsent(blk_n)); if (!blk_header) { throw std::runtime_error("Future block"); } @@ -448,64 +461,73 @@ std::string FinalChainImpl::trace(std::vector trxs, E blk_header->timestamp, BlockHeader::difficulty(), }, - trxs, params)); + state_trxs, trxs, params)); } -uint64_t FinalChainImpl::dpos_eligible_total_vote_count(EthBlockNumber blk_num) const { +uint64_t FinalChain::dposEligibleTotalVoteCount(EthBlockNumber blk_num) const { return total_vote_count_cache_.get(blk_num); } -uint64_t FinalChainImpl::dpos_eligible_vote_count(EthBlockNumber blk_num, const addr_t& addr) const { +uint64_t FinalChain::dposEligibleVoteCount(EthBlockNumber blk_num, const addr_t& addr) const { return dpos_vote_count_cache_.get(blk_num, addr); } -bool FinalChainImpl::dpos_is_eligible(EthBlockNumber blk_num, const addr_t& addr) const { +bool FinalChain::dposIsEligible(EthBlockNumber blk_num, const addr_t& addr) const { return dpos_is_eligible_cache_.get(blk_num, addr); } -vrf_wrapper::vrf_pk_t FinalChainImpl::dpos_get_vrf_key(EthBlockNumber blk_n, const addr_t& addr) const { +vrf_wrapper::vrf_pk_t FinalChain::dposGetVrfKey(EthBlockNumber blk_n, const addr_t& addr) const { return state_api_.dpos_get_vrf_key(blk_n, addr); } -std::vector FinalChainImpl::dpos_validators_total_stakes(EthBlockNumber blk_num) const { +std::vector FinalChain::dposValidatorsTotalStakes(EthBlockNumber blk_num) const { return state_api_.dpos_validators_total_stakes(blk_num); } -uint256_t FinalChainImpl::dpos_total_amount_delegated(EthBlockNumber blk_num) const { +uint256_t FinalChain::dposTotalAmountDelegated(EthBlockNumber blk_num) const { return state_api_.dpos_total_amount_delegated(blk_num); } -std::vector FinalChainImpl::dpos_validators_vote_counts(EthBlockNumber blk_num) const { +std::vector FinalChain::dposValidatorsVoteCounts(EthBlockNumber blk_num) const { return state_api_.dpos_validators_vote_counts(blk_num); } -void FinalChainImpl::wait_for_finalized() { +void FinalChain::waitForFinalized() { std::unique_lock lck(finalized_mtx_); finalized_cv_.wait_for(lck, std::chrono::milliseconds(10)); } -uint64_t FinalChainImpl::dpos_yield(EthBlockNumber blk_num) const { return state_api_.dpos_yield(blk_num); } +uint64_t FinalChain::dposYield(EthBlockNumber blk_num) const { return state_api_.dpos_yield(blk_num); } -u256 FinalChainImpl::dpos_total_supply(EthBlockNumber blk_num) const { return state_api_.dpos_total_supply(blk_num); } +u256 FinalChain::dposTotalSupply(EthBlockNumber blk_num) const { return state_api_.dpos_total_supply(blk_num); } -h256 FinalChainImpl::get_bridge_root(EthBlockNumber blk_num) const { +h256 FinalChain::getBridgeRoot(EthBlockNumber blk_num) const { const static auto get_bridge_root_method = util::EncodingSolidity::packFunctionCall("getBridgeRoot()"); - return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, kHardforksConfig.ficus_hf.bridge_contract_address, + return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, + kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address, state_api::ZeroAccount.nonce, 0, 10000000, get_bridge_root_method}, blk_num) .code_retval); } -h256 FinalChainImpl::get_bridge_epoch(EthBlockNumber blk_num) const { - const static auto get_bridge_epoch_method = util::EncodingSolidity::packFunctionCall("finalizedEpoch()"); - return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, kHardforksConfig.ficus_hf.bridge_contract_address, - state_api::ZeroAccount.nonce, 0, 10000000, get_bridge_epoch_method}, +h256 FinalChain::getBridgeEpoch(EthBlockNumber blk_num) const { + const static auto getBridgeEpoch_method = util::EncodingSolidity::packFunctionCall("finalizedEpoch()"); + return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, + kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address, + state_api::ZeroAccount.nonce, 0, 10000000, getBridgeEpoch_method}, blk_num) .code_retval); } -std::shared_ptr FinalChainImpl::get_transaction_hashes(std::optional n) const { - const auto& trxs = db_->getPeriodTransactions(last_if_absent(n)); +std::pair FinalChain::getBalance(addr_t const& addr) const { + if (auto acc = getAccount(addr)) { + return {acc->balance, true}; + } + return {0, false}; +} + +std::shared_ptr FinalChain::getTransactionHashes(std::optional n) const { + const auto& trxs = db_->getPeriodTransactions(lastIfAbsent(n)); auto ret = std::make_shared(); if (!trxs) { return ret; @@ -516,64 +538,104 @@ std::shared_ptr FinalChainImpl::get_transaction_hashes(std::o return ret; } -const SharedTransactions FinalChainImpl::get_transactions(std::optional n) const { - if (auto trxs = db_->getPeriodTransactions(last_if_absent(n))) { +const SharedTransactions FinalChain::getTransactions(std::optional n) const { + if (auto trxs = db_->getPeriodTransactions(lastIfAbsent(n))) { return *trxs; } return {}; } -std::shared_ptr FinalChainImpl::get_block_header(EthBlockNumber n) const { - if (auto raw = db_->lookup(n, DB::Columns::final_chain_blk_by_number); !raw.empty()) { - return BlockHeader::from_rlp(dev::RLP(raw)); +std::shared_ptr FinalChain::makeGenesisHeader(std::string&& raw_header) const { + auto bh = std::make_shared(std::move(raw_header)); + bh->gas_limit = kConfig.genesis.pbft.gas_limit; + bh->timestamp = kConfig.genesis.dag_genesis_block.getTimestamp(); + bh->hash = dev::sha3(bh->ethereumRlp()); + return bh; +} + +std::shared_ptr FinalChain::makeGenesisHeader(const h256& state_root) const { + auto header = std::make_shared(); + header->timestamp = kConfig.genesis.dag_genesis_block.getTimestamp(); + header->state_root = state_root; + header->gas_limit = kConfig.genesis.pbft.gas_limit; + header->hash = dev::sha3(header->ethereumRlp()); + return header; +} + +std::shared_ptr FinalChain::getBlockHeader(EthBlockNumber n) const { + if (auto raw = db_->lookup(n, DbStorage::Columns::final_chain_blk_by_number); !raw.empty()) { + if (n == 0) { + return makeGenesisHeader(std::move(raw)); + } + auto pbft = db_->getPbftBlock(n); + // we should usually have a pbft block for a final chain block + if (!pbft) { + return {}; + } + return std::make_shared(std::move(raw), *pbft, kBlockGasLimit); } return {}; } -std::optional FinalChainImpl::get_block_hash(EthBlockNumber n) const { - auto raw = db_->lookup(n, DB::Columns::final_chain_blk_hash_by_number); +std::optional FinalChain::finalChainHash(EthBlockNumber n) const { + auto delay = delegationDelay(); + if (n <= delay) { + // first delegation delay blocks will have zero hash + return ZeroHash(); + } + auto header = blockHeader(n - delay); + if (!header) { + return {}; + } + + if (kConfig.genesis.state.hardforks.isOnCornusHardfork(n)) { + return header->hash; + } + return header->state_root; +} + +std::optional FinalChain::getBlockHash(EthBlockNumber n) const { + auto raw = db_->lookup(n, DbStorage::Columns::final_chain_blk_hash_by_number); if (raw.empty()) { return {}; } return h256(raw, h256::FromBinary); } -EthBlockNumber FinalChainImpl::last_if_absent(const std::optional& client_blk_n) const { - return client_blk_n ? *client_blk_n : last_block_number(); +EthBlockNumber FinalChain::lastIfAbsent(const std::optional& client_blk_n) const { + return client_blk_n ? *client_blk_n : lastBlockNumber(); } -state_api::EVMTransaction FinalChainImpl::to_evm_transaction(const SharedTransaction& trx) { +state_api::EVMTransaction FinalChain::toEvmTransaction(const SharedTransaction& trx) { return state_api::EVMTransaction{ trx->getSender(), trx->getGasPrice(), trx->getReceiver(), trx->getNonce(), trx->getValue(), trx->getGas(), trx->getData(), }; } -void FinalChainImpl::append_evm_transactions(std::vector& evm_trxs, - const SharedTransactions& trxs) { +void FinalChain::appendEvmTransactions(std::vector& evm_trxs, + const SharedTransactions& trxs) { std::transform(trxs.cbegin(), trxs.cend(), std::back_inserter(evm_trxs), - [](const auto& trx) { return to_evm_transaction(trx); }); + [](const auto& trx) { return toEvmTransaction(trx); }); } -BlocksBlooms FinalChainImpl::block_blooms(const h256& chunk_id) const { - if (auto raw = db_->lookup(chunk_id, DB::Columns::final_chain_log_blooms_index); !raw.empty()) { +BlocksBlooms FinalChain::blockBlooms(const h256& chunk_id) const { + if (auto raw = db_->lookup(chunk_id, DbStorage::Columns::final_chain_log_blooms_index); !raw.empty()) { return dev::RLP(raw).toArray(); } return {}; } -h256 FinalChainImpl::block_blooms_chunk_id(EthBlockNumber level, EthBlockNumber index) { - return h256(index * 0xff + level); -} +h256 FinalChain::blockBloomsChunkId(EthBlockNumber level, EthBlockNumber index) { return h256(index * 0xff + level); } -std::vector FinalChainImpl::withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to, - EthBlockNumber level, EthBlockNumber index) const { +std::vector FinalChain::withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to, + EthBlockNumber level, EthBlockNumber index) const { std::vector ret; auto uCourse = int_pow(c_bloomIndexSize, level + 1); auto uFine = int_pow(c_bloomIndexSize, level); auto obegin = index == from / uCourse ? from / uFine % c_bloomIndexSize : 0; auto oend = index == to / uCourse ? (to / uFine) % c_bloomIndexSize + 1 : c_bloomIndexSize; - auto bb = block_blooms(block_blooms_chunk_id(level, index)); + auto bb = blockBlooms(blockBloomsChunkId(level, index)); for (auto o = obegin; o < oend; ++o) { if (bb[o].contains(b)) { // This level has something like what we want. diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index eba9cf45dc..1ed4ea86f4 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -7,6 +7,7 @@ #include #include "common/encoding_rlp.hpp" +#include "config/state_config.hpp" static_assert(sizeof(char) == sizeof(uint8_t)); @@ -163,10 +164,10 @@ ExecutionResult StateAPI::dry_run_transaction(EthBlockNumber blk_num, const EVMB trx); } -bytes StateAPI::trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trxs, - std::optional params) const { - return c_method_args_rlp(this_c_, blk_num, blk, trxs, - params); +bytes StateAPI::trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector& state_trxs, + const std::vector& trxs, std::optional params) const { + return c_method_args_rlp(this_c_, blk_num, blk, state_trxs, + trxs, params); } StateDescriptor StateAPI::get_last_committed_state_descriptor() const { diff --git a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp index 24d7eaa948..ad968d4a6c 100644 --- a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp +++ b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp @@ -4,7 +4,7 @@ namespace taraxa { static const vrf_wrapper::vrf_pk_t kEmptyVrfKey; -KeyManager::KeyManager(std::shared_ptr final_chain) : final_chain_(std::move(final_chain)) {} +KeyManager::KeyManager(std::shared_ptr final_chain) : final_chain_(std::move(final_chain)) {} std::shared_ptr KeyManager::getVrfKey(EthBlockNumber blk_n, const addr_t& addr) { { @@ -15,7 +15,7 @@ std::shared_ptr KeyManager::getVrfKey(EthBlockNumber blk_ } try { - if (auto key = final_chain_->dpos_get_vrf_key(blk_n, addr); key != kEmptyVrfKey) { + if (auto key = final_chain_->dposGetVrfKey(blk_n, addr); key != kEmptyVrfKey) { std::unique_lock lock(vrf_keys_mutex_); return vrf_keys_.insert_or_assign(addr, std::make_shared(std::move(key))).first->second; } diff --git a/libraries/core_libs/consensus/src/pbft/pbft_chain.cpp b/libraries/core_libs/consensus/src/pbft/pbft_chain.cpp index f728b85673..b34fd7cdaa 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_chain.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_chain.cpp @@ -2,7 +2,6 @@ #include -#include "common/jsoncpp.hpp" #include "pbft/pbft_manager.hpp" using namespace std; diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 9d413f5916..178c3428b0 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -8,10 +8,8 @@ #include "config/version.hpp" #include "dag/dag.hpp" +#include "dag/dag_manager.hpp" #include "final_chain/final_chain.hpp" -#include "network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" #include "pbft/period_data.hpp" #include "pillar_chain/pillar_chain_manager.hpp" #include "vote_manager/vote_manager.hpp" @@ -22,11 +20,11 @@ using namespace std::chrono_literals; constexpr std::chrono::milliseconds kPollingIntervalMs{100}; constexpr PbftStep kMaxSteps{13}; // Need to be a odd number -PbftManager::PbftManager(const GenesisConfig &conf, addr_t node_addr, std::shared_ptr db, +PbftManager::PbftManager(const FullNodeConfig &conf, std::shared_ptr db, std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, - std::shared_ptr final_chain, - std::shared_ptr pillar_chain_mgr, secret_t node_sk) + std::shared_ptr final_chain, + std::shared_ptr pillar_chain_mgr) : db_(std::move(db)), pbft_chain_(std::move(pbft_chain)), vote_mgr_(std::move(vote_mgr)), @@ -34,52 +32,62 @@ PbftManager::PbftManager(const GenesisConfig &conf, addr_t node_addr, std::share trx_mgr_(std::move(trx_mgr)), final_chain_(std::move(final_chain)), pillar_chain_mgr_(std::move(pillar_chain_mgr)), - node_addr_(std::move(node_addr)), - node_sk_(std::move(node_sk)), - kMinLambda(conf.pbft.lambda_ms), - dag_genesis_block_hash_(conf.dag_genesis_block.getHash()), - kGenesisConfig(conf), + node_addr_(dev::toAddress(conf.node_secret)), + node_sk_(conf.node_secret), + kMinLambda(conf.genesis.pbft.lambda_ms), + dag_genesis_block_hash_(conf.genesis.dag_genesis_block.getHash()), + kGenesisConfig(conf.genesis), proposed_blocks_(db_) { + const auto &node_addr = node_addr_; LOG_OBJECTS_CREATE("PBFT_MGR"); - for (auto period = final_chain_->last_block_number() + 1, curr_period = pbft_chain_->getPbftChainSize(); + auto current_pbft_period = pbft_chain_->getPbftChainSize(); + if (kGenesisConfig.state.hardforks.ficus_hf.isPillarBlockPeriod(current_pbft_period)) { + const auto current_pillar_block = pillar_chain_mgr_->getCurrentPillarBlock(); + // There is a race condition where pbt block could have been saved and node stopped before saving pillar block + if (current_pbft_period == + current_pillar_block->getPeriod() + kGenesisConfig.state.hardforks.ficus_hf.pillar_blocks_interval) + LOG(log_er_) << "Pillar block was not processed before restart, current period: " << current_pbft_period + << ", current pillar block period: " << current_pillar_block->getPeriod(); + processPillarBlock(current_pbft_period); + } + + for (auto period = final_chain_->lastBlockNumber() + 1, curr_period = pbft_chain_->getPbftChainSize(); period <= curr_period; ++period) { - auto period_raw = db_->getPeriodDataRaw(period); - if (period_raw.size() == 0) { + auto period_data = db_->getPeriodData(period); + if (!period_data.has_value()) { LOG(log_er_) << "DB corrupted - Cannot find PBFT block in period " << period << " in PBFT chain DB pbft_blocks."; assert(false); } - PeriodData period_data(period_raw); - if (period_data.pbft_blk->getPeriod() != period) { - LOG(log_er_) << "DB corrupted - PBFT block hash " << period_data.pbft_blk->getBlockHash() - << " has different period " << period_data.pbft_blk->getPeriod() + if (period_data->pbft_blk->getPeriod() != period) { + LOG(log_er_) << "DB corrupted - PBFT block hash " << period_data->pbft_blk->getBlockHash() + << " has different period " << period_data->pbft_blk->getPeriod() << " in block data than in block order db: " << period; assert(false); } // We need this section because votes need to be verified for reward distribution - for (const auto &v : period_data.previous_block_cert_votes) { + for (const auto &v : period_data->previous_block_cert_votes) { vote_mgr_->validateVote(v); } - finalize_(std::move(period_data), db_->getFinalizedDagBlockHashesByPeriod(period), period == curr_period); + finalize_(std::move(*period_data), db_->getFinalizedDagBlockHashesByPeriod(period), period == curr_period); } PbftPeriod start_period = 1; const auto recently_finalized_transactions_periods = - kRecentlyFinalizedTransactionsFactor * final_chain_->delegation_delay(); + kRecentlyFinalizedTransactionsFactor * final_chain_->delegationDelay(); if (pbft_chain_->getPbftChainSize() > recently_finalized_transactions_periods) { start_period = pbft_chain_->getPbftChainSize() - recently_finalized_transactions_periods; } for (PbftPeriod period = start_period; period <= pbft_chain_->getPbftChainSize(); period++) { - auto period_raw = db_->getPeriodDataRaw(period); - if (period_raw.size() == 0) { + auto period_data = db_->getPeriodData(period); + if (!period_data.has_value()) { LOG(log_er_) << "DB corrupted - Cannot find PBFT block in period " << period << " in PBFT chain DB pbft_blocks."; assert(false); } - PeriodData period_data(period_raw); - trx_mgr_->initializeRecentlyFinalizedTransactions(period_data); + trx_mgr_->initializeRecentlyFinalizedTransactions(*period_data); } // Initialize PBFT status @@ -201,7 +209,7 @@ void PbftManager::setPbftRound(PbftRound round) { void PbftManager::waitForPeriodFinalization() { do { // we need to be sure we finalized at least block block with num lower by delegation_delay - if (pbft_chain_->getPbftChainSize() <= final_chain_->last_block_number() + final_chain_->delegation_delay()) { + if (pbft_chain_->getPbftChainSize() <= final_chain_->lastBlockNumber() + final_chain_->delegationDelay()) { break; } thisThreadSleepForMilliSeconds(kPollingIntervalMs.count()); @@ -210,11 +218,11 @@ void PbftManager::waitForPeriodFinalization() { std::optional PbftManager::getCurrentDposTotalVotesCount() const { try { - return final_chain_->dpos_eligible_total_vote_count(pbft_chain_->getPbftChainSize()); + return final_chain_->dposEligibleTotalVoteCount(pbft_chain_->getPbftChainSize()); } catch (state_api::ErrFutureBlock &e) { LOG(log_wr_) << "Unable to get CurrentDposTotalVotesCount for period: " << pbft_chain_->getPbftChainSize() - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what(); + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what(); } return {}; @@ -222,11 +230,11 @@ std::optional PbftManager::getCurrentDposTotalVotesCount() const { std::optional PbftManager::getCurrentNodeVotesCount() const { try { - return final_chain_->dpos_eligible_vote_count(pbft_chain_->getPbftChainSize(), node_addr_); + return final_chain_->dposEligibleVoteCount(pbft_chain_->getPbftChainSize(), node_addr_); } catch (state_api::ErrFutureBlock &e) { LOG(log_wr_) << "Unable to get CurrentNodeVotesCount for period: " << pbft_chain_->getPbftChainSize() - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what(); + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what(); } return {}; @@ -455,6 +463,11 @@ void PbftManager::initialState() { proposed_blocks_.pushProposedPbftBlock(block, false); } + // TODO[2840]: remove this check if case nodes do not log the err messages after restart + if (const auto &err_msg = proposed_blocks_.checkOldBlocksPresence(current_pbft_period); err_msg.has_value()) { + LOG(log_er_) << "Old proposed blocks saved in db -> : " << *err_msg; + } + // Process saved cert voted block from db if (auto cert_voted_block_data = db_->getCertVotedBlockInRound(); cert_voted_block_data.has_value()) { const auto [cert_voted_block_round, cert_voted_block] = *cert_voted_block_data; @@ -631,7 +644,7 @@ void PbftManager::broadcastVotes() { } } -void PbftManager::testBroadcatVotesFunctionality() { +void PbftManager::testBroadcastVotesFunctionality() { // Set these variables to force broadcastVotes() send votes current_round_start_datetime_ = time_point{}; current_period_start_datetime_ = time_point{}; @@ -801,6 +814,17 @@ bool PbftManager::genAndPlaceProposeVote(const std::shared_ptr &propo } void PbftManager::gossipNewVote(const std::shared_ptr &vote, const std::shared_ptr &voted_block) { + gossipVote(vote, voted_block); + + auto found_voted_block_it = current_round_broadcasted_votes_.find(vote->getBlockHash()); + if (found_voted_block_it == current_round_broadcasted_votes_.end()) { + found_voted_block_it = current_round_broadcasted_votes_.insert({vote->getBlockHash(), {}}).first; + } + + found_voted_block_it->second.emplace_back(vote->getStep()); +} + +void PbftManager::gossipVote(const std::shared_ptr &vote, const std::shared_ptr &voted_block) { assert(!voted_block || vote->getBlockHash() == voted_block->getBlockHash()); auto net = network_.lock(); @@ -811,13 +835,6 @@ void PbftManager::gossipNewVote(const std::shared_ptr &vote, const std } net->gossipVote(vote, voted_block); - - auto found_voted_block_it = current_round_broadcasted_votes_.find(vote->getBlockHash()); - if (found_voted_block_it == current_round_broadcasted_votes_.end()) { - found_voted_block_it = current_round_broadcasted_votes_.insert({vote->getBlockHash(), {}}).first; - } - - found_voted_block_it->second.emplace_back(vote->getStep()); } void PbftManager::proposeBlock_() { @@ -1125,18 +1142,15 @@ PbftManager::generatePbftBlock(PbftPeriod propose_period, const blk_hash_t &prev std::transform(reward_votes.begin(), reward_votes.end(), std::back_inserter(reward_votes_hashes), [](const auto &v) { return v->getHash(); }); - h256 last_state_root; - if (propose_period > final_chain_->delegation_delay()) { - if (const auto header = final_chain_->block_header(propose_period - final_chain_->delegation_delay())) { - last_state_root = header->state_root; - } else { - LOG(log_wr_) << "Block for period " << propose_period << " could not be proposed as we are behind"; - return {}; - } + auto final_chain_hash = final_chain_->finalChainHash(propose_period); + if (!final_chain_hash) { + LOG(log_wr_) << "Block for period " << propose_period << " could not be proposed as we are behind"; + return {}; } try { - auto block = std::make_shared(prev_blk_hash, anchor_hash, order_hash, last_state_root, propose_period, - node_addr_, node_sk_, std::move(reward_votes_hashes), extra_data); + auto block = + std::make_shared(prev_blk_hash, anchor_hash, order_hash, final_chain_hash.value(), propose_period, + node_addr_, node_sk_, std::move(reward_votes_hashes), extra_data); return {std::make_pair(std::move(block), std::move(reward_votes))}; } catch (const std::exception &e) { @@ -1166,14 +1180,14 @@ blk_hash_t PbftManager::calculateOrderHash(const std::vector &dag_bl return dev::sha3(order_stream.out()); } -blk_hash_t PbftManager::calculateOrderHash(const std::vector &dag_blocks) { +blk_hash_t PbftManager::calculateOrderHash(const std::vector> &dag_blocks) { if (dag_blocks.empty()) { return kNullBlockHash; } dev::RLPStream order_stream(1); order_stream.appendList(dag_blocks.size()); for (auto const &blk : dag_blocks) { - order_stream << blk.getHash(); + order_stream << blk->getHash(); } return dev::sha3(order_stream.out()); } @@ -1273,7 +1287,8 @@ PbftManager::proposePbftBlock() { } const auto &dag_block_weight = dag_blk->getGasEstimation(); - if (total_weight + dag_block_weight > kGenesisConfig.pbft.gas_limit) { + const auto [dag_gas_limit, pbft_gas_limit] = kGenesisConfig.getGasLimits(current_pbft_period); + if (total_weight + dag_block_weight > pbft_gas_limit) { break; } total_weight += dag_block_weight; @@ -1382,25 +1397,21 @@ std::shared_ptr PbftManager::identifyLeaderBlock_(PbftRound round, Pb return empty_leader_block; } -PbftStateRootValidation PbftManager::validatePbftBlockStateRoot(const std::shared_ptr &pbft_block) const { - auto period = pbft_block->getPeriod(); - auto const &pbft_block_hash = pbft_block->getBlockHash(); - { - h256 prev_state_root_hash; - if (period > final_chain_->delegation_delay()) { - if (const auto header = final_chain_->block_header(period - final_chain_->delegation_delay())) { - prev_state_root_hash = header->state_root; - } else { - LOG(log_wr_) << "Block " << pbft_block_hash << " could not be validated as we are behind"; - return PbftStateRootValidation::Missing; - } - } - if (pbft_block->getPrevStateRoot() != prev_state_root_hash) { - LOG(log_er_) << "Block " << pbft_block_hash << " state root " << pbft_block->getPrevStateRoot() - << " isn't matching actual " << prev_state_root_hash; - return PbftStateRootValidation::Invalid; - } +PbftStateRootValidation PbftManager::validateFinalChainHash(const std::shared_ptr &pbft_block) const { + const auto period = pbft_block->getPeriod(); + const auto &pbft_block_hash = pbft_block->getBlockHash(); + + auto prev_final_chain_hash = final_chain_->finalChainHash(period); + if (!prev_final_chain_hash) { + LOG(log_wr_) << "Block " << pbft_block_hash << " could not be validated as we are behind"; + return PbftStateRootValidation::Missing; } + if (pbft_block->getFinalChainHash() != prev_final_chain_hash) { + LOG(log_er_) << "Block " << period << " hash " << pbft_block_hash << " state root " + << pbft_block->getFinalChainHash() << " isn't matching actual " << prev_final_chain_hash.value(); + return PbftStateRootValidation::Invalid; + } + return PbftStateRootValidation::Valid; } @@ -1473,7 +1484,7 @@ bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block auto const &pbft_block_hash = pbft_block->getBlockHash(); - if (validatePbftBlockStateRoot(pbft_block) != PbftStateRootValidation::Valid) { + if (validateFinalChainHash(pbft_block) != PbftStateRootValidation::Valid) { return false; } @@ -1535,7 +1546,7 @@ bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block for (auto const &dag_blk_hash : dag_blocks_order) { auto dag_block = dag_mgr_->getDagBlock(dag_blk_hash); assert(dag_block); - anchor_dag_block_order_cache_[anchor_hash].emplace_back(std::move(*dag_block)); + anchor_dag_block_order_cache_[anchor_hash].emplace_back(std::move(dag_block)); } auto last_pbft_block_hash = pbft_chain_->getLastPbftBlockHash(); @@ -1543,7 +1554,7 @@ bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block auto prev_pbft_block = pbft_chain_->getPbftBlockInChain(last_pbft_block_hash); auto ghost = dag_mgr_->getGhostPath(prev_pbft_block.getPivotDagBlockHash()); if (ghost.size() > 1 && anchor_hash != ghost[1]) { - if (!checkBlockWeight(anchor_dag_block_order_cache_[anchor_hash])) { + if (!checkBlockWeight(anchor_dag_block_order_cache_[anchor_hash], block_period)) { LOG(log_er_) << "PBFT block " << pbft_block_hash << " weight exceeded max limit"; anchor_dag_block_order_cache_.erase(anchor_hash); return false; @@ -1565,7 +1576,7 @@ bool PbftManager::pushCertVotedPbftBlockIntoChain_(const std::shared_ptr transactions_to_query; period_data.dag_blocks.reserve(dag_order_it->second.size()); for (const auto &dag_blk : dag_order_it->second) { - for (const auto &trx_hash : dag_blk.getTrxs()) { + for (const auto &trx_hash : dag_blk->getTrxs()) { if (trx_set.insert(trx_hash).second) { transactions_to_query.emplace_back(trx_hash); } @@ -1736,7 +1747,7 @@ bool PbftManager::pushPbftBlock_(PeriodData &&period_data, std::vectorgetHash(); }); // We need to reorder transactions before saving them reorderTransactions(period_data.transactions); @@ -1796,16 +1807,16 @@ bool PbftManager::pushPbftBlock_(PeriodData &&period_data, std::vectordelegation_delay(), e.g. block with period 32 + // Pillar block use state from current_pbft_chain_size - final_chain_->delegationDelay(), e.g. block with period 32 // uses state from period 27. - PbftPeriod request_period = current_pbft_chain_size - final_chain_->delegation_delay(); + PbftPeriod request_period = current_pbft_chain_size - final_chain_->delegationDelay(); // advancePeriod() -> resetConsensus() -> waitForPeriodFinalization() makes sure block request_period was already // finalized - assert(final_chain_->last_block_number() >= request_period); + assert(final_chain_->lastBlockNumber() >= request_period); - const auto block_header = final_chain_->block_header(request_period); - const auto bridge_root = final_chain_->get_bridge_root(request_period); - const auto bridge_epoch = final_chain_->get_bridge_epoch(request_period); + const auto block_header = final_chain_->blockHeader(request_period); + const auto bridge_root = final_chain_->getBridgeRoot(request_period); + const auto bridge_epoch = final_chain_->getBridgeEpoch(request_period); // Create pillar block const auto pillar_block = @@ -1814,7 +1825,7 @@ void PbftManager::processPillarBlock(PbftPeriod current_pbft_chain_size) { // Optimization - creates pillar vote right after pillar block was created, otherwise pillar votes are created during // next period pbft voting Check if node is eligible to vote for pillar block No need to catch ErrFutureBlock, // waitForPeriodFinalization() makes sure it does not happen - if (final_chain_->dpos_is_eligible(current_pbft_chain_size, node_addr_)) { + if (final_chain_->dposIsEligible(current_pbft_chain_size, node_addr_)) { if (pillar_block) { // Pillar votes are created in the next period (+ 1), this is optimization to create & broadcast it a bit faster const auto pillar_vote = pillar_chain_mgr_->genAndPlacePillarVote( @@ -1869,11 +1880,11 @@ std::optional>>> Pbf bool retry_logged = false; while (true) { - auto validation_result = validatePbftBlockStateRoot(period_data.pbft_blk); + auto validation_result = validateFinalChainHash(period_data.pbft_blk); if (validation_result != PbftStateRootValidation::Missing) { if (validation_result == PbftStateRootValidation::Invalid) { LOG(log_er_) << "Failed verifying block " << pbft_block_hash - << " with invalid state root: " << period_data.pbft_blk->getPrevStateRoot() + << " with invalid state root: " << period_data.pbft_blk->getFinalChainHash() << ". Disconnect malicious peer " << node_id.abridged(); sync_queue_.clear(); net->handleMaliciousSyncPeer(node_id); @@ -1882,7 +1893,7 @@ std::optional>>> Pbf break; } // If syncing and pbft manager is faster than execution a delay might be needed to allow EVM to catch up - final_chain_->wait_for_finalized(); + final_chain_->waitForFinalized(); if (!retry_logged) { LOG(log_wr_) << "PBFT block " << pbft_block_hash << " validation delayed, state root missing, execution is behind"; @@ -1920,7 +1931,7 @@ std::optional>>> Pbf std::unordered_set trx_set; std::vector transactions_to_query; for (auto const &dag_block : period_data.dag_blocks) { - for (auto const &trx_hash : dag_block.getTrxs()) { + for (auto const &trx_hash : dag_block->getTrxs()) { if (trx_set.insert(trx_hash).second) { transactions_to_query.emplace_back(trx_hash); } @@ -2112,11 +2123,11 @@ bool PbftManager::validatePbftBlockPillarVotes(const PeriodData &period_data) co bool PbftManager::canParticipateInConsensus(PbftPeriod period) const { try { - return final_chain_->dpos_is_eligible(period, node_addr_); + return final_chain_->dposIsEligible(period, node_addr_); } catch (state_api::ErrFutureBlock &e) { LOG(log_er_) << "Unable to decide if node is consensus node or not for period: " << period - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what() + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what() << ". Node is considered as not eligible to participate in consensus for period " << period; } @@ -2145,11 +2156,14 @@ void PbftManager::periodDataQueuePush(PeriodData &&period_data, dev::p2p::NodeID size_t PbftManager::periodDataQueueSize() const { return sync_queue_.size(); } -bool PbftManager::checkBlockWeight(const std::vector &dag_blocks) const { +bool PbftManager::checkBlockWeight(const std::vector> &dag_blocks, PbftPeriod period) const { const u256 total_weight = std::accumulate(dag_blocks.begin(), dag_blocks.end(), u256(0), - [](u256 value, const auto &dag_block) { return value + dag_block.getGasEstimation(); }); - if (total_weight > kGenesisConfig.pbft.gas_limit) { + [](u256 value, const auto &dag_block) { return value + dag_block->getGasEstimation(); }); + auto pbft_gas_limit = kGenesisConfig.state.hardforks.isOnCornusHardfork(period) + ? kGenesisConfig.state.hardforks.cornus_hf.pbft_gas_limit + : kGenesisConfig.pbft.gas_limit; + if (total_weight > pbft_gas_limit) { return false; } return true; diff --git a/libraries/core_libs/consensus/src/pbft/proposed_blocks.cpp b/libraries/core_libs/consensus/src/pbft/proposed_blocks.cpp index fa3aec043e..814914a068 100644 --- a/libraries/core_libs/consensus/src/pbft/proposed_blocks.cpp +++ b/libraries/core_libs/consensus/src/pbft/proposed_blocks.cpp @@ -81,4 +81,14 @@ void ProposedBlocks::cleanupProposedPbftBlocksByPeriod(PbftPeriod period) { } } +std::optional ProposedBlocks::checkOldBlocksPresence(PbftPeriod current_period) const { + std::string msg; + for (auto period_it = proposed_blocks_.begin(); + period_it != proposed_blocks_.end() && period_it->first < current_period; period_it++) { + msg += std::to_string(period_it->first) + " -> " + std::to_string(period_it->second.size()) + ". "; + } + + return msg.empty() ? std::nullopt : std::make_optional(msg); +} + } // namespace taraxa diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_block.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_block.cpp index bcd0c359db..c35c896389 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_block.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_block.cpp @@ -1,5 +1,6 @@ #include "pillar_chain/pillar_block.hpp" +#include #include #include "common/encoding_rlp.hpp" diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp index a822c50539..932ad31a08 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp @@ -2,6 +2,7 @@ #include +#include "config/hardfork.hpp" #include "final_chain/final_chain.hpp" #include "key_manager/key_manager.hpp" #include "network/network.hpp" @@ -52,7 +53,7 @@ std::shared_ptr PillarChainManager::createPillarBlock( PbftPeriod period, const std::shared_ptr& block_header, const h256& bridge_root, const h256& bridge_epoch) { blk_hash_t previous_pillar_block_hash{}; // null block hash - auto new_vote_counts = final_chain_->dpos_validators_vote_counts(period); + auto new_vote_counts = final_chain_->dposValidatorsVoteCounts(period); std::vector votes_count_changes; // First ever pillar block @@ -105,14 +106,12 @@ std::shared_ptr PillarChainManager::createPillarBlock( return pillar_block; } -void PillarChainManager::saveNewPillarBlock(std::shared_ptr pillar_block, +void PillarChainManager::saveNewPillarBlock(const std::shared_ptr& pillar_block, std::vector&& new_vote_counts) { - CurrentPillarBlockDataDb data{std::move(pillar_block), std::move(new_vote_counts)}; - db_->saveCurrentPillarBlockData(data); - std::scoped_lock lock(mutex_); - current_pillar_block_ = std::move(data.pillar_block); - current_pillar_block_vote_counts_ = std::move(data.vote_counts); + db_->saveCurrentPillarBlockData({pillar_block, new_vote_counts}); + current_pillar_block_ = pillar_block; + current_pillar_block_vote_counts_ = std::move(new_vote_counts); } std::shared_ptr PillarChainManager::genAndPlacePillarVote(PbftPeriod period, @@ -257,7 +256,7 @@ bool PillarChainManager::validatePillarVote(const std::shared_ptr vo // Check if signer is eligible validator try { - if (!final_chain_->dpos_is_eligible(period - 1, validator)) { + if (!final_chain_->dposIsEligible(period - 1, validator)) { LOG(log_er_) << "Validator is not eligible. Pillar vote " << vote->getHash(); return false; } @@ -278,7 +277,7 @@ bool PillarChainManager::validatePillarVote(const std::shared_ptr vo uint64_t PillarChainManager::addVerifiedPillarVote(const std::shared_ptr& vote) { uint64_t validator_vote_count = 0; try { - validator_vote_count = final_chain_->dpos_eligible_vote_count(vote->getPeriod() - 1, vote->getVoterAddr()); + validator_vote_count = final_chain_->dposEligibleVoteCount(vote->getPeriod() - 1, vote->getVoterAddr()); } catch (state_api::ErrFutureBlock& e) { LOG(log_er_) << "Pillar vote " << vote->getHash() << " with period " << vote->getPeriod() << " is too far ahead of DPOS. " << e.what(); @@ -332,7 +331,6 @@ bool PillarChainManager::isValidPillarBlock(const std::shared_ptr& } const auto last_finalized_pillar_block = getLastFinalizedPillarBlock(); - std::shared_lock lock(mutex_); assert(last_finalized_pillar_block); // Check if some block was not skipped @@ -352,7 +350,7 @@ std::optional PillarChainManager::getPillarConsensusThreshold(PbftPeri try { // Pillar chain consensus threshold = total votes count / 2 + 1 - threshold = final_chain_->dpos_eligible_total_vote_count(period) / 2 + 1; + threshold = final_chain_->dposEligibleTotalVoteCount(period) / 2 + 1; } catch (state_api::ErrFutureBlock& e) { LOG(log_er_) << "Unable to get dpos total votes count for period " << period << " to calculate pillar consensus threshold: " << e.what(); diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp index f60a49dc9e..4d2bed6a4b 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp @@ -52,14 +52,45 @@ std::vector> PillarVotes::getVerifiedVotes(PbftPerio return {}; } - if (above_threshold && found_pillar_block_votes->second.weight < found_period_votes->second.threshold) { - return {}; + // Return minimum amount of >threshold sorted votes based on their weight + if (above_threshold) { + const auto threshold = found_period_votes->second.threshold; + if (found_pillar_block_votes->second.weight < threshold) { + return {}; + } + + // Sort votes using multiset + auto customComparator = [](const std::pair, uint64_t>& a, + const std::pair, uint64_t>& b) { + return a.second > b.second; + }; + std::multiset, uint64_t>, decltype(customComparator)> votes_set( + customComparator); + std::transform(found_pillar_block_votes->second.votes.begin(), found_pillar_block_votes->second.votes.end(), + std::inserter(votes_set, votes_set.end()), [](const auto& el) { return el.second; }); + + // Move minimum amount of > threshold votes with the highest vote counts + std::vector> sorted_votes; + sorted_votes.reserve(votes_set.size()); + uint64_t tmp_votes_count = 0; + for (auto it = votes_set.begin(); it != votes_set.end();) { + auto&& vote_pair = votes_set.extract(it++); + tmp_votes_count += vote_pair.value().second; + sorted_votes.push_back(std::move(vote_pair.value().first)); + + if (tmp_votes_count >= threshold) { + break; + } + } + + return sorted_votes; } + // Return all votes std::vector> votes; votes.reserve(found_pillar_block_votes->second.votes.size()); for (const auto& sig : found_pillar_block_votes->second.votes) { - votes.push_back(sig.second); + votes.push_back(sig.second.first); } return votes; @@ -70,7 +101,7 @@ bool PillarVotes::periodDataInitialized(PbftPeriod period) const { return votes_.contains(period); } -bool PillarVotes::addVerifiedVote(const std::shared_ptr& vote, u_int64_t validator_vote_count) { +bool PillarVotes::addVerifiedVote(const std::shared_ptr& vote, uint64_t validator_vote_count) { std::scoped_lock lock(mutex_); auto found_period_votes = votes_.find(vote->getPeriod()); @@ -92,7 +123,7 @@ bool PillarVotes::addVerifiedVote(const std::shared_ptr& vote, u_int auto pillar_block_votes = found_period_votes->second.pillar_block_votes.insert({vote->getBlockHash(), {}}).first; // Add validator vote count only if the vote is new - if (pillar_block_votes->second.votes.emplace(vote->getHash(), vote).second) { + if (pillar_block_votes->second.votes.emplace(vote->getHash(), std::make_pair(vote, validator_vote_count)).second) { pillar_block_votes->second.weight += validator_vote_count; } @@ -101,7 +132,7 @@ bool PillarVotes::addVerifiedVote(const std::shared_ptr& vote, u_int void PillarVotes::initializePeriodData(PbftPeriod period, uint64_t threshold) { std::scoped_lock lock(mutex_); - votes_.insert({period, PeriodVotes{.threshold = threshold}}); + votes_.insert({period, PeriodVotes{{}, {}, threshold}}); } void PillarVotes::eraseVotes(PbftPeriod min_period) { diff --git a/libraries/core_libs/consensus/src/rewards/block_stats.cpp b/libraries/core_libs/consensus/src/rewards/block_stats.cpp index 643e098eee..caa02c32cb 100644 --- a/libraries/core_libs/consensus/src/rewards/block_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/block_stats.cpp @@ -86,9 +86,9 @@ void BlockStats::processStats(const PeriodData& block, const bool aspen_dag_rewa void BlockStats::processDagBlocks(const PeriodData& block) { auto block_transactions_hashes_ = toTrxHashesSet(block.transactions); for (const auto& dag_block : block.dag_blocks) { - const addr_t& dag_block_author = dag_block.getSender(); + const addr_t& dag_block_author = dag_block->getSender(); bool has_unique_transactions = false; - for (const auto& tx_hash : dag_block.getTrxs()) { + for (const auto& tx_hash : dag_block->getTrxs()) { // we should also check that we have transactions in pbft block(period data). Because in dag blocks could be // included transaction that was finalized in previous blocks if (!block_transactions_hashes_.contains(tx_hash)) { @@ -110,17 +110,17 @@ void BlockStats::processDagBlocks(const PeriodData& block) { void BlockStats::processDagBlocksAspen(const PeriodData& block) { uint16_t min_difficulty = UINT16_MAX; for (const auto& dag_block : block.dag_blocks) { - if (dag_block.getDifficulty() < min_difficulty) { - min_difficulty = dag_block.getDifficulty(); + if (dag_block->getDifficulty() < min_difficulty) { + min_difficulty = dag_block->getDifficulty(); } } for (const auto& dag_block : block.dag_blocks) { - const addr_t& dag_block_author = dag_block.getSender(); - if (dag_block.getDifficulty() == min_difficulty) { + const addr_t& dag_block_author = dag_block->getSender(); + if (dag_block->getDifficulty() == min_difficulty) { validators_stats_[dag_block_author].dag_blocks_count_ += 1; total_dag_blocks_count_ += 1; } - for (const auto& tx_hash : dag_block.getTrxs()) { + for (const auto& tx_hash : dag_block->getTrxs()) { addTransaction(tx_hash, dag_block_author); } } diff --git a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp index b3bf47d6c6..f7c624603e 100644 --- a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp @@ -5,7 +5,7 @@ #include "storage/storage.hpp" namespace taraxa::rewards { -Stats::Stats(uint32_t committee_size, const HardforksConfig& hardforks, std::shared_ptr db, +Stats::Stats(uint32_t committee_size, const HardforksConfig& hardforks, std::shared_ptr db, std::function&& dpos_eligible_total_vote_count, EthBlockNumber last_blk_num) : kCommitteeSize(committee_size), kHardforksConfig(hardforks), @@ -19,7 +19,7 @@ void Stats::recoverFromDb(EthBlockNumber lastBlockNumber) { clear(lastBlockNumber); } - auto iterator = db_->getColumnIterator(DB::Columns::block_rewards_stats); + auto iterator = db_->getColumnIterator(DbStorage::Columns::block_rewards_stats); for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) { PbftPeriod period; memcpy(&period, iterator->key().data(), sizeof(PbftPeriod)); @@ -27,10 +27,10 @@ void Stats::recoverFromDb(EthBlockNumber lastBlockNumber) { } } -void Stats::saveBlockStats(uint64_t period, const BlockStats& stats, DbStorage::Batch& write_batch) { +void Stats::saveBlockStats(uint64_t period, const BlockStats& stats, Batch& write_batch) { dev::RLPStream encoding; stats.rlp(encoding); - db_->insert(write_batch, DB::Columns::block_rewards_stats, period, encoding.out()); + db_->insert(write_batch, DbStorage::Columns::block_rewards_stats, period, encoding.out()); } uint32_t Stats::getCurrentDistributionFrequency(uint64_t current_block) const { @@ -47,7 +47,7 @@ void Stats::clear(uint64_t current_period) { if (frequency > 1 && current_period % frequency == 0) { // clear need to be called on vector because it was moved before blocks_stats_.clear(); - db_->deleteColumnData(DB::Columns::block_rewards_stats); + db_->deleteColumnData(DbStorage::Columns::block_rewards_stats); } } @@ -67,7 +67,7 @@ BlockStats Stats::getBlockStats(const PeriodData& blk, const std::vector& } std::vector Stats::processStats(const PeriodData& current_blk, const std::vector& trxs_gas_used, - DbStorage::Batch& write_batch) { + Batch& write_batch) { const auto current_period = current_blk.pbft_blk->getPeriod(); const auto frequency = getCurrentDistributionFrequency(current_period); auto block_stats = getBlockStats(current_blk, trxs_gas_used); diff --git a/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp b/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp index 309edeb93d..8aa4e2f531 100644 --- a/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp +++ b/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp @@ -2,22 +2,23 @@ #include "common/encoding_solidity.hpp" #include "common/types.hpp" +#include "config/config.hpp" #include "transaction/transaction_manager.hpp" +#include "vote/pbft_vote.hpp" namespace taraxa { const auto kContractAddress = addr_t("0x00000000000000000000000000000000000000EE"); -SlashingManager::SlashingManager(std::shared_ptr final_chain, - std::shared_ptr trx_manager, std::shared_ptr gas_pricer, - const FullNodeConfig &config, secret_t node_sk) +SlashingManager::SlashingManager(const FullNodeConfig &config, std::shared_ptr final_chain, + std::shared_ptr trx_manager, std::shared_ptr gas_pricer) : final_chain_(std::move(final_chain)), trx_manager_(std::move(trx_manager)), gas_pricer_(std::move(gas_pricer)), double_voting_proofs_(1000, 100), kConfig(config), - kAddress(toAddress(node_sk)), - kPrivateKey(std::move(node_sk)) {} + kAddress(toAddress(kConfig.node_secret)), + kPrivateKey(kConfig.node_secret) {} bool SlashingManager::submitDoubleVotingProof(const std::shared_ptr &vote_a, const std::shared_ptr &vote_b) { @@ -52,7 +53,7 @@ bool SlashingManager::submitDoubleVotingProof(const std::shared_ptr &v } // Check the balance - const auto account = final_chain_->get_account(kAddress).value_or(taraxa::state_api::ZeroAccount); + const auto account = final_chain_->getAccount(kAddress).value_or(taraxa::state_api::ZeroAccount); if (account.balance == 0) { return false; } diff --git a/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp b/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp index 84f2611f4d..0095715306 100644 --- a/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp +++ b/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp @@ -1,5 +1,7 @@ #include "transaction/gas_pricer.hpp" +#include "storage/storage.hpp" + namespace taraxa { GasPricer::GasPricer(const GasPriceConfig& config, bool is_light_node, std::shared_ptr db) @@ -25,7 +27,7 @@ u256 GasPricer::bid() const { void GasPricer::init(const std::shared_ptr& db) { const auto last_blk_num = - db->lookup_int(final_chain::DBMetaKeys::LAST_NUMBER, DB::Columns::final_chain_meta); + db->lookup_int(DBMetaKeys::LAST_NUMBER, DbStorage::Columns::final_chain_meta); if (!last_blk_num || *last_blk_num == 0) return; auto block_num = *last_blk_num; diff --git a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp index 64fac73911..e524a43c78 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp @@ -4,13 +4,13 @@ #include #include -#include "dag/dag.hpp" +#include "config/config.hpp" #include "logger/logger.hpp" #include "transaction/transaction.hpp" namespace taraxa { -TransactionManager::TransactionManager(FullNodeConfig const &conf, std::shared_ptr db, - std::shared_ptr final_chain, addr_t node_addr) +TransactionManager::TransactionManager(const FullNodeConfig &conf, std::shared_ptr db, + std::shared_ptr final_chain, addr_t node_addr) : kConf(conf), transactions_pool_(final_chain, kConf.transactions_pool_size), kDagBlockGasLimit(kConf.genesis.dag.gas_limit), @@ -121,27 +121,34 @@ TransactionStatus TransactionManager::insertValidatedTransaction(std::shared_ptr return TransactionStatus::Known; } - const auto account = final_chain_->get_account(tx->getSender()).value_or(taraxa::state_api::ZeroAccount); + const auto account = final_chain_->getAccount(tx->getSender()); bool proposable = true; - // Ensure the transaction adheres to nonce ordering - if (account.nonce > tx->getNonce()) { - if (!insert_non_proposable) { - return TransactionStatus::Known; + if (account.has_value()) { + // Ensure the transaction adheres to nonce ordering + if (account->nonce > tx->getNonce()) { + if (!insert_non_proposable) { + return TransactionStatus::Known; + } + proposable = false; } - proposable = false; - } - // Transactor should have enough funds to cover the costs - // cost == V + GP * GL - if (account.balance < tx->getCost()) { + // Transactor should have enough funds to cover the costs + // cost == V + GP * GL + if (account->balance < tx->getCost()) { + if (!insert_non_proposable) { + return TransactionStatus::Known; + } + proposable = false; + } + } else { if (!insert_non_proposable) { return TransactionStatus::Known; } proposable = false; } - const auto last_block_number = final_chain_->last_block_number(); + const auto last_block_number = final_chain_->lastBlockNumber(); LOG(log_dg_) << "Transaction " << trx_hash << " inserted in trx pool"; return transactions_pool_.insert(std::move(tx), proposable, last_block_number); } @@ -189,24 +196,18 @@ void TransactionManager::saveTransactionsFromDagBlock(SharedTransactions const & std::unique_lock transactions_lock(transactions_mutex_); for (auto t : trxs) { - const auto account = final_chain_->get_account(t->getSender()).value_or(taraxa::state_api::ZeroAccount); const auto tx_hash = t->getHash(); - // Cheacking nonce in cheaper than checking db, verify with nonce if possible - bool trx_not_executed = account.nonce < t->getNonce() || !db_->transactionFinalized(tx_hash); - - if (trx_not_executed) { - if (!recently_finalized_transactions_.contains(tx_hash) && - !nonfinalized_transactions_in_dag_.contains(tx_hash)) { - db_->addTransactionToBatch(*t, write_batch); - nonfinalized_transactions_in_dag_.emplace(tx_hash, t); - if (transactions_pool_.erase(tx_hash)) { - LOG(log_dg_) << "Transaction " << tx_hash << " removed from trx pool "; - // Transactions are counted when included in DAG - accepted_transactions.emplace_back(tx_hash); - } - trx_count_++; + if (!recently_finalized_transactions_.contains(tx_hash) && !nonfinalized_transactions_in_dag_.contains(tx_hash) && + !db_->transactionFinalized(tx_hash)) { + db_->addTransactionToBatch(*t, write_batch); + nonfinalized_transactions_in_dag_.emplace(tx_hash, t); + if (transactions_pool_.erase(tx_hash)) { + LOG(log_dg_) << "Transaction " << tx_hash << " removed from trx pool "; + // Transactions are counted when included in DAG + accepted_transactions.emplace_back(tx_hash); } + trx_count_++; } } db_->addStatusFieldToBatch(StatusDbField::TrxCount, trx_count_, write_batch); @@ -235,7 +236,7 @@ void TransactionManager::recoverNonfinalizedTransactions() { // line can be removed or replaced with an assert db_->removeTransactionToBatch(trx_hash, write_batch); } else { - // Cache sender now by caling getSender since getting sender later on proposing blocks can affect performance + // Cache sender now by calling getSender since getting sender later on proposing blocks can affect performance trxs[i]->getSender(); nonfinalized_transactions_in_dag_.emplace(trx_hash, std::move(trxs[i])); } @@ -253,9 +254,9 @@ bool TransactionManager::nonProposableTransactionsOverTheLimit() const { return transactions_pool_.nonProposableTransactionsOverTheLimit(); } -bool TransactionManager::isTransactionPoolFull(size_t precentage) const { +bool TransactionManager::isTransactionPoolFull(size_t percentage) const { std::shared_lock transactions_lock(transactions_mutex_); - return transactions_pool_.size() >= (kConf.transactions_pool_size * precentage / 100); + return transactions_pool_.size() >= (kConf.transactions_pool_size * percentage / 100); } size_t TransactionManager::getNonfinalizedTrxSize() const { @@ -336,7 +337,7 @@ void TransactionManager::initializeRecentlyFinalizedTransactions(const PeriodDat void TransactionManager::updateFinalizedTransactionsStatus(PeriodData const &period_data) { // !!! There is no lock because it is called under std::unique_lock trx_lock(trx_mgr_->getTransactionsMutex()); const auto recently_finalized_transactions_periods = - kRecentlyFinalizedTransactionsFactor * final_chain_->delegation_delay(); + kRecentlyFinalizedTransactionsFactor * final_chain_->delegationDelay(); if (period_data.transactions.size() > 0) { // Delete transactions older than recently_finalized_transactions_periods if (period_data.pbft_blk->getPeriod() > recently_finalized_transactions_periods) { @@ -419,7 +420,7 @@ SharedTransactions TransactionManager::getTransactions(const vec_trx_t &trxs_has for (auto trx : finalizedTransactions) { // Only include transactions with valid nonce at proposal period - auto acc = final_chain_->get_account(trx->getSender(), proposal_period); + auto acc = final_chain_->getAccount(trx->getSender(), proposal_period); if (acc.has_value() && acc->nonce > trx->getNonce()) { LOG(log_er_) << "Old transaction: " << trx->getHash(); } else { diff --git a/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp b/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp index 235acd14ba..ba7360f774 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp @@ -4,7 +4,7 @@ namespace taraxa { -TransactionQueue::TransactionQueue(std::shared_ptr final_chain, size_t max_size) +TransactionQueue::TransactionQueue(std::shared_ptr final_chain, size_t max_size) : known_txs_(max_size * 2, max_size / 5), kNonProposableTransactionsMaxSize(max_size * kNonProposableTransactionsLimitPercentage / 100), kMaxSize(max_size), @@ -135,7 +135,7 @@ TransactionStatus TransactionQueue::insert(std::shared_ptr &&transa assert(nonce_it->second->getHash() != tx_hash); // Replace transaction if gas price higher if (transaction->getGasPrice() > nonce_it->second->getGasPrice()) { - // Place same nonce transaction with lower gas price in non propsable transactions since it could be + // Place same nonce transaction with lower gas price in non proposable transactions since it could be // possible that some dag block might contain it non_proposable_transactions_[nonce_it->second->getHash()] = {last_block_number, nonce_it->second}; queue_transactions_.erase(nonce_it->second->getHash()); @@ -190,7 +190,7 @@ void TransactionQueue::blockFinalized(uint64_t block_number) { void TransactionQueue::purge() { for (auto account_it = account_nonce_transactions_.begin(); account_it != account_nonce_transactions_.end();) { - const auto account = final_chain_->get_account(account_it->first); + const auto account = final_chain_->getAccount(account_it->first); if (account.has_value()) { for (auto nonce_it = account_it->second.begin(); nonce_it != account_it->second.end();) { if (nonce_it->first < account->nonce) { diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 0ce0835c93..1a1e031f69 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -7,20 +7,17 @@ #include #include "network/network.hpp" -#include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" #include "pbft/pbft_manager.hpp" namespace taraxa { -VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, const secret_t& node_sk, - const vrf_wrapper::vrf_sk_t& vrf_sk, std::shared_ptr db, - std::shared_ptr pbft_chain, std::shared_ptr final_chain, +VoteManager::VoteManager(const FullNodeConfig& config, std::shared_ptr db, + std::shared_ptr pbft_chain, std::shared_ptr final_chain, std::shared_ptr key_manager, std::shared_ptr slashing_manager) - : kNodeAddr(node_addr), - kPbftConfig(pbft_config), - kVrfSk(vrf_sk), - kNodeSk(node_sk), + : kNodeAddr(dev::toAddress(config.node_secret)), + kPbftConfig(config.genesis.pbft), + kVrfSk(config.vrf_secret), + kNodeSk(config.node_secret), kNodePub(dev::toPublic(kNodeSk)), db_(std::move(db)), pbft_chain_(std::move(pbft_chain)), @@ -28,6 +25,7 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, key_manager_(std::move(key_manager)), slashing_manager_(std::move(slashing_manager)), already_validated_votes_(1000000, 1000) { + const auto& node_addr = kNodeAddr; LOG_OBJECTS_CREATE("VOTE_MGR"); auto addVerifiedVotes = [this](const std::vector>& votes, @@ -583,7 +581,7 @@ PbftPeriod VoteManager::getRewardVotesPbftBlockPeriod() { } void VoteManager::resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, - DbStorage::Batch& batch) { + Batch& batch) { // Save 2t+1 cert votes to database, remove old reward votes { std::scoped_lock lock(reward_votes_info_mutex_); @@ -815,7 +813,7 @@ void VoteManager::saveOwnVerifiedVote(const std::shared_ptr& vote) { std::vector> VoteManager::getOwnVerifiedVotes() { return own_verified_votes_; } -void VoteManager::clearOwnVerifiedVotes(DbStorage::Batch& write_batch) { +void VoteManager::clearOwnVerifiedVotes(Batch& write_batch) { db_->clearOwnVerifiedVotes(write_batch, own_verified_votes_); own_verified_votes_.clear(); } @@ -840,19 +838,19 @@ std::shared_ptr VoteManager::generateVoteWithWeight(const taraxa::blk_ uint64_t pbft_sortition_threshold = 0; try { - voter_dpos_votes_count = final_chain_->dpos_eligible_vote_count(period - 1, kNodeAddr); + voter_dpos_votes_count = final_chain_->dposEligibleVoteCount(period - 1, kNodeAddr); if (!voter_dpos_votes_count) { // No delegation return nullptr; } - total_dpos_votes_count = final_chain_->dpos_eligible_total_vote_count(period - 1); + total_dpos_votes_count = final_chain_->dposEligibleTotalVoteCount(period - 1); pbft_sortition_threshold = getPbftSortitionThreshold(total_dpos_votes_count, vote_type); } catch (state_api::ErrFutureBlock& e) { LOG(log_er_) << "Unable to place vote for period: " << period << ", round: " << round << ", step: " << step << ", voted block hash: " << blockhash.abridged() << ". " - << "Period is too far ahead of actual finalized pbft chain size (" << final_chain_->last_block_number() + << "Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() << "). Err msg: " << e.what(); return nullptr; @@ -881,10 +879,9 @@ std::pair VoteManager::validateVote(const std::shared_ptrgetPeriod(); try { - const uint64_t voter_dpos_votes_count = - final_chain_->dpos_eligible_vote_count(vote_period - 1, vote->getVoterAddr()); + const uint64_t voter_dpos_votes_count = final_chain_->dposEligibleVoteCount(vote_period - 1, vote->getVoterAddr()); - // Mark vote as validated only after getting dpos_eligible_vote_count and other values from dpos contract. It is + // Mark vote as validated only after getting dposEligibleVoteCount and other values from dpos contract. It is // possible that we are behind in processing pbft blocks, in which case we wont be able to get values from dpos // contract and validation fails due to this, not due to the fact that vote is invalid... already_validated_votes_.insert(vote->getHash()); @@ -910,7 +907,7 @@ std::pair VoteManager::validateVote(const std::shared_ptrdpos_eligible_total_vote_count(vote_period - 1); + const uint64_t total_dpos_votes_count = final_chain_->dposEligibleTotalVoteCount(vote_period - 1); const uint64_t pbft_sortition_threshold = getPbftSortitionThreshold(total_dpos_votes_count, vote->getType()); if (!vote->calculateWeight(voter_dpos_votes_count, total_dpos_votes_count, pbft_sortition_threshold)) { err_msg << "Invalid vote " << vote->getHash() << ": zero weight"; @@ -918,7 +915,7 @@ std::pair VoteManager::validateVote(const std::shared_ptrgetHash() << " against dpos contract. It's period (" << vote_period - << ") is too far ahead of actual finalized pbft chain size (" << final_chain_->last_block_number() + << ") is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() << "). Err msg: " << e.what(); return {false, err_msg.str()}; } catch (...) { @@ -944,11 +941,11 @@ std::optional VoteManager::getPbftTwoTPlusOne(PbftPeriod pbft_period, uint64_t total_dpos_votes_count = 0; try { - total_dpos_votes_count = final_chain_->dpos_eligible_total_vote_count(pbft_period); + total_dpos_votes_count = final_chain_->dposEligibleTotalVoteCount(pbft_period); } catch (state_api::ErrFutureBlock& e) { LOG(log_er_) << "Unable to calculate 2t + 1 for period: " << pbft_period - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what(); + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what(); return {}; } @@ -971,14 +968,14 @@ bool VoteManager::genAndValidateVrfSortition(PbftPeriod pbft_period, PbftRound p VrfPbftSortition vrf_sortition(kVrfSk, {PbftVoteTypes::propose_vote, pbft_period, pbft_round, 1}); try { - const uint64_t voter_dpos_votes_count = final_chain_->dpos_eligible_vote_count(pbft_period - 1, kNodeAddr); + const uint64_t voter_dpos_votes_count = final_chain_->dposEligibleVoteCount(pbft_period - 1, kNodeAddr); if (!voter_dpos_votes_count) { LOG(log_er_) << "Generated vrf sortition for period " << pbft_period << ", round " << pbft_round << " is invalid. Voter dpos vote count is zero"; return false; } - const uint64_t total_dpos_votes_count = final_chain_->dpos_eligible_total_vote_count(pbft_period - 1); + const uint64_t total_dpos_votes_count = final_chain_->dposEligibleTotalVoteCount(pbft_period - 1); const uint64_t pbft_sortition_threshold = getPbftSortitionThreshold(total_dpos_votes_count, PbftVoteTypes::propose_vote); @@ -989,9 +986,9 @@ bool VoteManager::genAndValidateVrfSortition(PbftPeriod pbft_period, PbftRound p return false; } } catch (state_api::ErrFutureBlock& e) { - LOG(log_er_) << "Unable to generate vrf sorititon for period " << pbft_period << ", round " << pbft_round - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what(); + LOG(log_er_) << "Unable to generate vrf sortition for period " << pbft_period << ", round " << pbft_round + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what(); return false; } diff --git a/libraries/core_libs/network/graphql/src/account.cpp b/libraries/core_libs/network/graphql/src/account.cpp index cd413bfe31..ebaa93bfec 100644 --- a/libraries/core_libs/network/graphql/src/account.cpp +++ b/libraries/core_libs/network/graphql/src/account.cpp @@ -9,12 +9,12 @@ namespace graphql::taraxa { Account::Account(std::shared_ptr<::taraxa::final_chain::FinalChain> final_chain, dev::Address address, ::taraxa::EthBlockNumber blk_n) : kAddress(std::move(address)), final_chain_(std::move(final_chain)) { - account_ = final_chain_->get_account(kAddress, blk_n); + account_ = final_chain_->getAccount(kAddress, blk_n); } Account::Account(std::shared_ptr<::taraxa::final_chain::FinalChain> final_chain, dev::Address address) : kAddress(std::move(address)), final_chain_(std::move(final_chain)) { - account_ = final_chain_->get_account(kAddress); + account_ = final_chain_->getAccount(kAddress); } response::Value Account::getAddress() const noexcept { return response::Value(kAddress.toString()); } @@ -34,11 +34,11 @@ response::Value Account::getTransactionCount() const noexcept { } response::Value Account::getCode() const noexcept { - return response::Value(dev::toJS(final_chain_->get_code(kAddress, final_chain_->last_block_number()))); + return response::Value(dev::toJS(final_chain_->getCode(kAddress, final_chain_->lastBlockNumber()))); } response::Value Account::getStorage(response::Value&& slotArg) const { - return response::Value(dev::toJS(final_chain_->get_account_storage(kAddress, dev::u256(slotArg.get())))); + return response::Value(dev::toJS(final_chain_->getAccountStorage(kAddress, dev::u256(slotArg.get())))); } } // namespace graphql::taraxa \ No newline at end of file diff --git a/libraries/core_libs/network/graphql/src/block.cpp b/libraries/core_libs/network/graphql/src/block.cpp index 9158d37e1c..c8a4466202 100644 --- a/libraries/core_libs/network/graphql/src/block.cpp +++ b/libraries/core_libs/network/graphql/src/block.cpp @@ -77,7 +77,7 @@ response::Value Block::getTimestamp() const noexcept { response::Value Block::getLogsBloom() const noexcept { return response::Value(block_header_->log_bloom.toString()); } -response::Value Block::getMixHash() const noexcept { return response::Value(block_header_->mix_hash().toString()); } +response::Value Block::getMixHash() const noexcept { return response::Value(block_header_->mixHash().toString()); } response::Value Block::getDifficulty() const noexcept { return response::Value(block_header_->difficulty().str()); } @@ -91,9 +91,7 @@ std::optional>> Block::getOmmers() co std::shared_ptr Block::getOmmerAt(int&&) const noexcept { return nullptr; } -response::Value Block::getOmmerHash() const noexcept { - return response::Value(block_header_->uncles_hash().toString()); -} +response::Value Block::getOmmerHash() const noexcept { return response::Value(block_header_->unclesHash().toString()); } std::optional>> Block::getTransactions() const noexcept { std::vector> ret; diff --git a/libraries/core_libs/network/graphql/src/query.cpp b/libraries/core_libs/network/graphql/src/query.cpp index e8705353e4..2a14b9ed8a 100644 --- a/libraries/core_libs/network/graphql/src/query.cpp +++ b/libraries/core_libs/network/graphql/src/query.cpp @@ -37,17 +37,17 @@ std::shared_ptr Query::getBlock(std::optional&& std::optional<::taraxa::EthBlockNumber> block_number; if (number) { block_number = number->get(); - if (const auto last_block_number = final_chain_->last_block_number(); last_block_number < block_number) { + if (const auto last_block_number = final_chain_->lastBlockNumber(); last_block_number < block_number) { return nullptr; } } if (hash) { - block_number = final_chain_->block_number(dev::h256(hash->get())); + block_number = final_chain_->blockNumber(dev::h256(hash->get())); if (!block_number) { return nullptr; } } - auto block_header = final_chain_->block_header(block_number); + auto block_header = final_chain_->blockHeader(block_number); if (!block_header) { return nullptr; } @@ -85,7 +85,7 @@ std::vector> Query::getBlocks(response::Value&& f end_block_num = start_block_num + Query::kMaxPropagationLimit; } - const int last_block_number = final_chain_->last_block_number(); + const int last_block_number = final_chain_->lastBlockNumber(); if (start_block_num > last_block_number) { return blocks; } else if (end_block_num > last_block_number) { @@ -156,7 +156,7 @@ std::vector> Query::getPeriodDagBlocks( if (periodArg) { period = periodArg->get(); } else { - period = final_chain_->last_block_number(); + period = final_chain_->lastBlockNumber(); } auto dag_blocks = db_->getFinalizedDagBlockByPeriod(period); if (dag_blocks.size()) { diff --git a/libraries/core_libs/network/graphql/src/sync_state.cpp b/libraries/core_libs/network/graphql/src/sync_state.cpp index 4a41e83dad..af3b7b5800 100644 --- a/libraries/core_libs/network/graphql/src/sync_state.cpp +++ b/libraries/core_libs/network/graphql/src/sync_state.cpp @@ -12,7 +12,7 @@ SyncState::SyncState(std::shared_ptr<::taraxa::final_chain::FinalChain> final_ch response::Value SyncState::getStartingBlock() const noexcept { return response::Value(0); } response::Value SyncState::getCurrentBlock() const noexcept { - return response::Value(static_cast(final_chain_->last_block_number())); + return response::Value(static_cast(final_chain_->lastBlockNumber())); } response::Value SyncState::getHighestBlock() const noexcept { diff --git a/libraries/core_libs/network/graphql/src/transaction.cpp b/libraries/core_libs/network/graphql/src/transaction.cpp index d2dd870980..8ba1353daf 100644 --- a/libraries/core_libs/network/graphql/src/transaction.cpp +++ b/libraries/core_libs/network/graphql/src/transaction.cpp @@ -26,7 +26,7 @@ response::Value Transaction::getNonce() const noexcept { return response::Value( std::optional Transaction::getIndex() const noexcept { if (!location_) { - location_ = final_chain_->transaction_location(transaction_->getHash()); + location_ = final_chain_->transactionLocation(transaction_->getHash()); if (!location_) return std::nullopt; } return {location_->position}; @@ -34,7 +34,7 @@ std::optional Transaction::getIndex() const noexcept { std::shared_ptr Transaction::getFrom(std::optional&&) const { if (!location_) { - location_ = final_chain_->transaction_location(transaction_->getHash()); + location_ = final_chain_->transactionLocation(transaction_->getHash()); if (!location_) { return std::make_shared(std::make_shared(final_chain_, transaction_->getSender())); } @@ -46,7 +46,7 @@ std::shared_ptr Transaction::getFrom(std::optional Transaction::getTo(std::optional&&) const { if (!transaction_->getReceiver()) return nullptr; if (!location_) { - location_ = final_chain_->transaction_location(transaction_->getHash()); + location_ = final_chain_->transactionLocation(transaction_->getHash()); if (!location_) { return std::make_shared(std::make_shared(final_chain_, *transaction_->getReceiver())); } @@ -69,7 +69,7 @@ response::Value Transaction::getInputData() const noexcept { std::shared_ptr Transaction::getBlock() const { if (!location_) { - location_ = final_chain_->transaction_location(transaction_->getHash()); + location_ = final_chain_->transactionLocation(transaction_->getHash()); if (!location_) return nullptr; } return get_block_by_num_(location_->period); @@ -77,7 +77,7 @@ std::shared_ptr Transaction::getBlock() const { std::optional Transaction::getStatus() const noexcept { if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return std::nullopt; } return response::Value(static_cast(receipt_->status_code)); @@ -85,7 +85,7 @@ std::optional Transaction::getStatus() const noexcept { std::optional Transaction::getGasUsed() const noexcept { if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return std::nullopt; } return response::Value(static_cast(receipt_->gas_used)); @@ -93,7 +93,7 @@ std::optional Transaction::getGasUsed() const noexcept { std::optional Transaction::getCumulativeGasUsed() const noexcept { if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return std::nullopt; } return response::Value(static_cast(receipt_->cumulative_gas_used)); @@ -101,7 +101,7 @@ std::optional Transaction::getCumulativeGasUsed() const noexcep std::shared_ptr Transaction::getCreatedContract(std::optional&&) const noexcept { if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return nullptr; } if (!receipt_->new_contract_address) return nullptr; @@ -111,7 +111,7 @@ std::shared_ptr Transaction::getCreatedContract(std::optional>> Transaction::getLogs() const noexcept { std::vector> logs; if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return std::nullopt; } diff --git a/libraries/core_libs/network/graphql/src/types/current_state.cpp b/libraries/core_libs/network/graphql/src/types/current_state.cpp index f7dc510266..7432978312 100644 --- a/libraries/core_libs/network/graphql/src/types/current_state.cpp +++ b/libraries/core_libs/network/graphql/src/types/current_state.cpp @@ -7,7 +7,7 @@ CurrentState::CurrentState(std::shared_ptr<::taraxa::final_chain::FinalChain> fi : final_chain_(std::move(final_chain)), dag_manager_(std::move(dag_manager)) {} response::Value CurrentState::getFinalBlock() const noexcept { - return response::Value(static_cast(final_chain_->last_block_number())); + return response::Value(static_cast(final_chain_->lastBlockNumber())); } response::Value CurrentState::getDagBlockLevel() const noexcept { diff --git a/libraries/core_libs/network/include/network/http_server.hpp b/libraries/core_libs/network/include/network/http_server.hpp index 615f0cb954..1641c1a051 100644 --- a/libraries/core_libs/network/include/network/http_server.hpp +++ b/libraries/core_libs/network/include/network/http_server.hpp @@ -4,6 +4,7 @@ #include #include +#include "common/thread_pool.hpp" #include "common/types.hpp" #include "logger/logger.hpp" @@ -22,8 +23,8 @@ class HttpHandler; class HttpServer : public std::enable_shared_from_this { public: - HttpServer(boost::asio::io_context& io, boost::asio::ip::tcp::endpoint ep, const addr_t& node_addr, - const std::shared_ptr& request_processor); + HttpServer(std::shared_ptr thread_pool, boost::asio::ip::tcp::endpoint ep, const addr_t& node_addr, + const std::shared_ptr& request_processor, uint32_t max_pending_tasks); virtual ~HttpServer() { HttpServer::stop(); } @@ -31,6 +32,8 @@ class HttpServer : public std::enable_shared_from_this { bool stop(); void accept(); + uint32_t numberOfPendingTasks() const; + bool pendingTasksOverLimit() const { return numberOfPendingTasks() > kMaxPendingTasks; } boost::asio::io_context& getIoContext() { return io_context_; } std::shared_ptr getShared(); std::shared_ptr createConnection(); @@ -45,6 +48,8 @@ class HttpServer : public std::enable_shared_from_this { boost::asio::io_context& io_context_; boost::asio::ip::tcp::acceptor acceptor_; boost::asio::ip::tcp::endpoint ep_; + std::weak_ptr thread_pool_; + const uint32_t kMaxPendingTasks; LOG_OBJECTS_DEFINE }; // QQ: diff --git a/libraries/core_libs/network/include/network/network.hpp b/libraries/core_libs/network/include/network/network.hpp index 8c654eaaa7..9cce7ec551 100644 --- a/libraries/core_libs/network/include/network/network.hpp +++ b/libraries/core_libs/network/include/network/network.hpp @@ -7,15 +7,9 @@ #include #include -#include #include -#include -#include -#include -#include #include "common/thread_pool.hpp" -#include "common/util.hpp" #include "config/config.hpp" #include "network/tarcap/taraxa_capability.hpp" #include "network/tarcap/tarcap_version.hpp" @@ -61,7 +55,7 @@ class Network { uint64_t syncTimeSeconds() const; void setSyncStatePeriod(PbftPeriod period); - void gossipDagBlock(const DagBlock &block, bool proposed, const SharedTransactions &trxs); + void gossipDagBlock(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs); void gossipVote(const std::shared_ptr &vote, const std::shared_ptr &block, bool rebroadcast = false); void gossipVotesBundle(const std::vector> &votes, bool rebroadcast = false); @@ -77,6 +71,13 @@ class Network { */ void requestPillarBlockVotesBundle(PbftPeriod period, const blk_hash_t &pillar_block_hash); + /** + * @brief Get packets queue status + * + * @return true if packets queue is over the limit + */ + bool packetQueueOverLimit() const; + // METHODS USED IN TESTS ONLY template std::shared_ptr getSpecificHandler() const; diff --git a/libraries/core_libs/network/include/network/tarcap/packet_types.hpp b/libraries/core_libs/network/include/network/tarcap/packet_types.hpp index 6cd9baa778..94e1a14a97 100644 --- a/libraries/core_libs/network/include/network/tarcap/packet_types.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packet_types.hpp @@ -11,29 +11,29 @@ namespace taraxa::network { */ enum SubprotocolPacketType : uint32_t { // Consensus packets with high processing priority - HighPriorityPackets = 0, - VotePacket, // Vote packer can contain (optional) also pbft block - GetNextVotesSyncPacket, - VotesBundlePacket, + kHighPriorityPackets = 0, + kVotePacket, // Vote packer can contain (optional) also pbft block + kGetNextVotesSyncPacket, + kVotesBundlePacket, // Standard packets with mid processing priority - MidPriorityPackets, - DagBlockPacket, + kMidPriorityPackets, + kDagBlockPacket, // DagSyncPacket has mid priority as it is also used for ad-hoc syncing in case new dag blocks miss tips/pivot - DagSyncPacket, - TransactionPacket, + kDagSyncPacket, + kTransactionPacket, // Non critical packets with low processing priority - LowPriorityPackets, - StatusPacket, - GetPbftSyncPacket, - PbftSyncPacket, - GetDagSyncPacket, - PillarVotePacket, - GetPillarVotesBundlePacket, - PillarVotesBundlePacket, + kLowPriorityPackets, + kStatusPacket, + kGetPbftSyncPacket, + kPbftSyncPacket, + kGetDagSyncPacket, + kPillarVotePacket, + kGetPillarVotesBundlePacket, + kPillarVotesBundlePacket, - PacketCount + kPacketCount }; /** @@ -42,31 +42,31 @@ enum SubprotocolPacketType : uint32_t { */ inline std::string convertPacketTypeToString(SubprotocolPacketType packet_type) { switch (packet_type) { - case StatusPacket: + case kStatusPacket: return "StatusPacket"; - case DagBlockPacket: + case kDagBlockPacket: return "DagBlockPacket"; - case GetDagSyncPacket: + case kGetDagSyncPacket: return "GetDagSyncPacket"; - case DagSyncPacket: + case kDagSyncPacket: return "DagSyncPacket"; - case TransactionPacket: + case kTransactionPacket: return "TransactionPacket"; - case VotePacket: + case kVotePacket: return "VotePacket"; - case GetNextVotesSyncPacket: + case kGetNextVotesSyncPacket: return "GetNextVotesSyncPacket"; - case VotesBundlePacket: + case kVotesBundlePacket: return "VotesBundlePacket"; - case GetPbftSyncPacket: + case kGetPbftSyncPacket: return "GetPbftSyncPacket"; - case PbftSyncPacket: + case kPbftSyncPacket: return "PbftSyncPacket"; - case PillarVotePacket: + case kPillarVotePacket: return "PillarVotePacket"; - case GetPillarVotesBundlePacket: + case kGetPillarVotesBundlePacket: return "GetPillarVotesBundlePacket"; - case PillarVotesBundlePacket: + case kPillarVotesBundlePacket: return "PillarVotesBundlePacket"; default: break; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp new file mode 100644 index 0000000000..092bc55054 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp @@ -0,0 +1,15 @@ +#pragma once + +#include "dag/dag_block.hpp" +#include "transaction/system_transaction.hpp" + +namespace taraxa::network::tarcap { + +struct DagBlockPacket { + std::vector> transactions; + std::shared_ptr dag_block; + + RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp new file mode 100644 index 0000000000..0e5c352add --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp @@ -0,0 +1,17 @@ +#pragma once + +#include "dag/dag_block.hpp" +#include "transaction/system_transaction.hpp" + +namespace taraxa::network::tarcap { + +struct DagSyncPacket { + PbftPeriod request_period; + PbftPeriod response_period; + std::vector> transactions; + std::vector> dag_blocks; + + RLP_FIELDS_DEFINE_INPLACE(request_period, response_period, transactions, dag_blocks) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp new file mode 100644 index 0000000000..5c96debe2d --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp @@ -0,0 +1,15 @@ +#pragma once + +#include "dag/dag_block.hpp" +#include "transaction/system_transaction.hpp" + +namespace taraxa::network::tarcap { + +struct GetDagSyncPacket { + PbftPeriod peer_period; + std::vector blocks_hashes; + + RLP_FIELDS_DEFINE_INPLACE(peer_period, blocks_hashes) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp new file mode 100644 index 0000000000..08c584b249 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp @@ -0,0 +1,14 @@ +#pragma once + +#include "common/encoding_rlp.hpp" + +namespace taraxa::network::tarcap { + +struct GetNextVotesBundlePacket { + PbftPeriod peer_pbft_period; + PbftRound peer_pbft_round; + + RLP_FIELDS_DEFINE_INPLACE(peer_pbft_period, peer_pbft_round) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp new file mode 100644 index 0000000000..b0429f0882 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp @@ -0,0 +1,13 @@ +#pragma once + +#include "common/encoding_rlp.hpp" + +namespace taraxa::network::tarcap { + +struct GetPbftSyncPacket { + size_t height_to_sync; + + RLP_FIELDS_DEFINE_INPLACE(height_to_sync) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp new file mode 100644 index 0000000000..fe25c5469b --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp @@ -0,0 +1,14 @@ +#pragma once + +#include "common/encoding_rlp.hpp" + +namespace taraxa::network::tarcap { + +struct GetPillarVotesBundlePacket { + PbftPeriod period; + blk_hash_t pillar_block_hash; + + RLP_FIELDS_DEFINE_INPLACE(period, pillar_block_hash) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp new file mode 100644 index 0000000000..b3f5fd4a12 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp @@ -0,0 +1,17 @@ +#pragma once + +#include "pbft/period_data.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" + +namespace taraxa::network::tarcap { + +struct PbftSyncPacket { + bool last_block; + PeriodData period_data; + std::optional current_block_cert_votes_bundle; + + RLP_FIELDS_DEFINE_INPLACE(last_block, period_data, current_block_cert_votes_bundle) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp new file mode 100644 index 0000000000..e5bac3c4d9 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp @@ -0,0 +1,15 @@ +#pragma once + +#include "common/encoding_rlp.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "vote/pillar_vote.hpp" + +namespace taraxa::network::tarcap { + +struct PillarVotePacket { + std::shared_ptr pillar_vote; + + RLP_FIELDS_DEFINE_INPLACE(pillar_vote) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp new file mode 100644 index 0000000000..eb9e4061bf --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp @@ -0,0 +1,16 @@ +#pragma once + +#include "common/encoding_rlp.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "vote/pillar_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" + +namespace taraxa::network::tarcap { + +struct PillarVotesBundlePacket { + OptimizedPillarVotesBundle pillar_votes_bundle; + + RLP_FIELDS_DEFINE_INPLACE(pillar_votes_bundle) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp new file mode 100644 index 0000000000..314becb769 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp @@ -0,0 +1,28 @@ +#pragma once + +namespace taraxa::network::tarcap { + +struct StatusPacket { + struct InitialData { + uint64_t peer_chain_id; + blk_hash_t genesis_hash; + unsigned node_major_version; + unsigned node_minor_version; + unsigned node_patch_version; + bool is_light_node; + PbftPeriod node_history; + + RLP_FIELDS_DEFINE_INPLACE(peer_chain_id, genesis_hash, node_major_version, node_minor_version, node_patch_version, + is_light_node, node_history) + }; + + PbftPeriod peer_pbft_chain_size; + PbftRound peer_pbft_round; + uint64_t peer_dag_level; + bool peer_syncing; + std::optional initial_data; + + RLP_FIELDS_DEFINE_INPLACE(peer_pbft_chain_size, peer_pbft_round, peer_dag_level, peer_syncing, initial_data) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp new file mode 100644 index 0000000000..3ef4d4f3fa --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp @@ -0,0 +1,14 @@ +#pragma once + +#include "transaction/transaction.hpp" + +namespace taraxa::network::tarcap { + +struct TransactionPacket { + std::vector> transactions; + std::vector extra_transactions_hashes; + + RLP_FIELDS_DEFINE_INPLACE(transactions, extra_transactions_hashes) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp new file mode 100644 index 0000000000..d8a7c900a0 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "pbft/pbft_block.hpp" +#include "vote/pbft_vote.hpp" + +namespace taraxa::network::tarcap { + +struct VotePacket { + struct OptionalData { + std::shared_ptr pbft_block; + uint64_t peer_chain_size; + + RLP_FIELDS_DEFINE_INPLACE(pbft_block, peer_chain_size) + }; + + std::shared_ptr vote; + std::optional optional_data; + + RLP_FIELDS_DEFINE_INPLACE(vote, optional_data) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp new file mode 100644 index 0000000000..064b84634d --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp @@ -0,0 +1,15 @@ +#pragma once + +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" + +namespace taraxa::network::tarcap { + +struct VotesBundlePacket { + OptimizedPbftVotesBundle votes_bundle; + + RLP_FIELDS_DEFINE_INPLACE(votes_bundle) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handler.hpp index 2e1f01cf67..8839a7c615 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handler.hpp @@ -3,7 +3,7 @@ #include #include -#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp" namespace taraxa::network::tarcap { @@ -18,9 +18,9 @@ class PacketsHandler { * control the lifetime of the object or not. * * @param packet_type - * @return reference to std::shared_ptr + * @return reference to std::shared_ptr */ - const std::shared_ptr& getSpecificHandler(SubprotocolPacketType packet_type) const; + const std::shared_ptr& getSpecificHandler(SubprotocolPacketType packet_type) const; /** * @brief templated getSpecificHandler method for getting specific packet handler based on @@ -44,7 +44,7 @@ class PacketsHandler { private: // Map of all packets handlers, factory method selects specific packet handler for processing based on packet type - std::unordered_map> packets_handlers_; + std::unordered_map> packets_handlers_; }; template diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp new file mode 100644 index 0000000000..37d438345d --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include "network/threadpool/packet_data.hpp" + +namespace taraxa::network::tarcap { + +// Taraxa capability name +constexpr char TARAXA_CAPABILITY_NAME[] = "taraxa"; + +/** + * @brief Base Packet handler base class that consists processPacket function + */ +class BasePacketHandler { + public: + BasePacketHandler() = default; + virtual ~BasePacketHandler() = default; + BasePacketHandler(const BasePacketHandler&) = default; + BasePacketHandler(BasePacketHandler&&) = default; + BasePacketHandler& operator=(const BasePacketHandler&) = default; + BasePacketHandler& operator=(BasePacketHandler&&) = default; + + /** + * @brief Packet processing function wrapper + * + * @param packet_data + */ + // TODO: use unique_ptr for packet data for easier & quicker copying + virtual void processPacket(const threadpool::PacketData& packet_data) = 0; +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp index 91bc38fd27..ab87d68089 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp @@ -1,32 +1,42 @@ #pragma once #include "packet_handler.hpp" - -namespace taraxa { -class KeyManager; -class PillarVote; - -namespace pillar_chain { -class PillarChainManager; -} // namespace pillar_chain - -namespace final_chain { -class FinalChain; -} - -} // namespace taraxa +#include "pillar_chain/pillar_chain_manager.hpp" namespace taraxa::network::tarcap { -class ExtPillarVotePacketHandler : public PacketHandler { +template +class ExtPillarVotePacketHandler : public PacketHandler { public: ExtPillarVotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pillar_chain_manager, - const addr_t& node_addr, const std::string& log_channel); + const addr_t& node_addr, const std::string& log_channel) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel), + pillar_chain_manager_{std::move(pillar_chain_manager)} {} protected: - bool processPillarVote(const std::shared_ptr& vote, const std::shared_ptr& peer); + bool processPillarVote(const std::shared_ptr& vote, const std::shared_ptr& peer) { + if (!pillar_chain_manager_->isRelevantPillarVote(vote)) { + LOG(this->log_dg_) << "Drop irrelevant pillar vote " << vote->getHash() << ", period " << vote->getPeriod() + << " from peer " << peer->getId(); + return false; + } + + if (!pillar_chain_manager_->validatePillarVote(vote)) { + // TODO: enable for mainnet + // std::ostringstream err_msg; + // err_msg << "Invalid pillar vote " << vote->getHash() << " from peer " << peer->getId(); + // throw MaliciousPeerException(err_msg.str()); + return false; + } + + pillar_chain_manager_->addVerifiedPillarVote(vote); + + // Mark pillar vote as known for peer + peer->markPillarVoteAsKnown(vote->getHash()); + return true; + } protected: std::shared_ptr pillar_chain_manager_; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp index 16b9058076..ee53a26841 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp @@ -1,31 +1,33 @@ #pragma once -#include "dag/dag_block.hpp" #include "dag/dag_manager.hpp" +#include "network/tarcap/packets/latest/get_dag_sync_packet.hpp" +#include "network/tarcap/packets/latest/get_pbft_sync_packet.hpp" +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "packet_handler.hpp" - -namespace taraxa { -class PbftChain; -class PbftManager; -class DagManager; -class DbStorage; -} // namespace taraxa +#include "pbft/pbft_chain.hpp" +#include "pbft/pbft_manager.hpp" namespace taraxa::network::tarcap { -class PbftSyncingState; - /** * @brief ExtSyncingPacketHandler is extended abstract PacketHandler with added functions that are used in packet * handlers that need to interact with syncing process in some way */ -class ExtSyncingPacketHandler : public PacketHandler { +template +class ExtSyncingPacketHandler : public PacketHandler { public: ExtSyncingPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, - std::shared_ptr db, const addr_t &node_addr, const std::string &log_channel_name); + std::shared_ptr db, const addr_t &node_addr, const std::string &log_channel_name) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), + pbft_syncing_state_(std::move(pbft_syncing_state)), + pbft_chain_(std::move(pbft_chain)), + pbft_mgr_(std::move(pbft_mgr)), + dag_mgr_(std::move(dag_mgr)), + db_(std::move(db)) {} virtual ~ExtSyncingPacketHandler() = default; ExtSyncingPacketHandler &operator=(const ExtSyncingPacketHandler &) = delete; @@ -35,7 +37,44 @@ class ExtSyncingPacketHandler : public PacketHandler { * @brief Start syncing pbft if needed * */ - void startSyncingPbft(); + void startSyncingPbft() { + if (pbft_syncing_state_->isPbftSyncing()) { + LOG(this->log_dg_) << "startSyncingPbft called but syncing_ already true"; + return; + } + + std::shared_ptr peer = getMaxChainPeer(); + if (!peer) { + LOG(this->log_nf_) << "Restarting syncing PBFT not possible since no connected peers"; + return; + } + + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (peer->pbft_chain_size_ > pbft_sync_period) { + auto peer_id = peer->getId().abridged(); + auto peer_pbft_chain_size = peer->pbft_chain_size_.load(); + if (!pbft_syncing_state_->setPbftSyncing(true, pbft_sync_period, std::move(peer))) { + LOG(this->log_dg_) << "startSyncingPbft called but syncing_ already true"; + return; + } + LOG(this->log_si_) << "Restarting syncing PBFT from peer " << peer_id << ", peer PBFT chain size " + << peer_pbft_chain_size << ", own PBFT chain synced at period " << pbft_sync_period; + + if (syncPeerPbft(pbft_sync_period + 1)) { + // Disable snapshots only if are syncing from scratch + if (pbft_syncing_state_->isDeepPbftSyncing()) { + db_->disableSnapshots(); + } + } else { + pbft_syncing_state_->setPbftSyncing(false); + } + } else { + LOG(this->log_nf_) << "Restarting syncing PBFT not needed since our pbft chain size: " << pbft_sync_period << "(" + << pbft_chain_->getPbftChainSize() << ")" + << " is greater or equal than max node pbft chain size:" << peer->pbft_chain_size_; + db_->enableSnapshots(); + } + } /** * @brief Send sync request to the current syncing peer with specified request_period @@ -44,14 +83,108 @@ class ExtSyncingPacketHandler : public PacketHandler { * * @return true if sync request was sent, otherwise false */ - bool syncPeerPbft(PbftPeriod request_period); + bool syncPeerPbft(PbftPeriod request_period) { + const auto syncing_peer = pbft_syncing_state_->syncingPeer(); + if (!syncing_peer) { + LOG(this->log_er_) << "Unable to send GetPbftSyncPacket. No syncing peer set."; + return false; + } + + if (request_period > syncing_peer->pbft_chain_size_) { + LOG(this->log_wr_) << "Invalid syncPeerPbft argument. Node " << syncing_peer->getId() << " chain size " + << syncing_peer->pbft_chain_size_ << ", requested period " << request_period; + return false; + } + + LOG(this->log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " + << syncing_peer->getId(); + return this->sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, + encodePacketRlp(GetPbftSyncPacket{request_period})); + } - void requestDagBlocks(const dev::p2p::NodeID &_nodeID, const std::unordered_set &blocks, - PbftPeriod period); - void requestPendingDagBlocks(std::shared_ptr peer = nullptr); + void requestDagBlocks(const dev::p2p::NodeID &_nodeID, std::vector &&blocks, PbftPeriod period) { + this->sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, + encodePacketRlp(GetDagSyncPacket{period, std::move(blocks)})); + } + + void requestPendingDagBlocks(std::shared_ptr peer = nullptr) { + if (!peer) { + peer = getMaxChainPeer([](const std::shared_ptr &peer) { + if (peer->peer_dag_synced_ || !peer->dagSyncingAllowed()) { + return false; + } + return true; + }); + if (!peer) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since no peers are matching conditions"; + return; + } + } + + if (!peer) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since no connected peers"; + return; + } + + // This prevents ddos requesting dag blocks. We can only request this one time from one peer. + if (peer->peer_dag_synced_) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since already requested for peer"; + return; + } + + // Only request dag blocks if periods are matching + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (pbft_sync_period == peer->pbft_chain_size_) { + // This prevents parallel requests + if (bool b = false; !peer->peer_dag_syncing_.compare_exchange_strong(b, !b)) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since already requesting for peer"; + return; + } + LOG(this->log_nf_) << "Request pending blocks from peer " << peer->getId(); + std::vector known_non_finalized_blocks; + auto [period, blocks] = dag_mgr_->getNonFinalizedBlocks(); + for (auto &level_blocks : blocks) { + for (auto &block : level_blocks.second) { + known_non_finalized_blocks.emplace_back(block); + } + } + + requestDagBlocks(peer->getId(), std::move(known_non_finalized_blocks), period); + } + } std::shared_ptr getMaxChainPeer(std::function &)> filter_func = - [](const std::shared_ptr &) { return true; }); + [](const std::shared_ptr &) { return true; }) { + std::shared_ptr max_pbft_chain_peer; + PbftPeriod max_pbft_chain_size = 0; + uint64_t max_node_dag_level = 0; + + // Find peer with max pbft chain and dag level + for (auto const &peer : this->peers_state_->getAllPeers()) { + // Apply the filter function + if (!filter_func(peer.second)) { + continue; + } + + if (peer.second->pbft_chain_size_ > max_pbft_chain_size) { + if (peer.second->peer_light_node && + pbft_mgr_->pbftSyncingPeriod() + peer.second->peer_light_node_history < peer.second->pbft_chain_size_) { + LOG(this->log_er_) << "Disconnecting from light node peer " << peer.first + << " History: " << peer.second->peer_light_node_history + << " chain size: " << peer.second->pbft_chain_size_; + this->disconnect(peer.first, dev::p2p::UserReason); + continue; + } + max_pbft_chain_size = peer.second->pbft_chain_size_; + max_node_dag_level = peer.second->dag_level_; + max_pbft_chain_peer = peer.second; + } else if (peer.second->pbft_chain_size_ == max_pbft_chain_size && peer.second->dag_level_ > max_node_dag_level) { + max_node_dag_level = peer.second->dag_level_; + max_pbft_chain_peer = peer.second; + } + } + return max_pbft_chain_peer; + } protected: std::shared_ptr pbft_syncing_state_{nullptr}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp index 62dc18f0cc..7a9e662b38 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp @@ -1,15 +1,13 @@ #pragma once +#include "network/tarcap/packets/latest/get_pbft_sync_packet.hpp" +#include "network/tarcap/packets/latest/votes_bundle_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "packet_handler.hpp" - -namespace taraxa { -class PbftVote; -class PbftManager; -class PbftChain; -class PbftBlock; -class VoteManager; -class SlashingManager; -} // namespace taraxa +#include "pbft/pbft_manager.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" +#include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap { @@ -17,13 +15,21 @@ namespace taraxa::network::tarcap { * @brief ExtVotesPacketHandler is extended abstract PacketHandler with added functions that are used in packet * handlers that process pbft votes */ -class ExtVotesPacketHandler : public PacketHandler { +template +class ExtVotesPacketHandler : public PacketHandler { public: ExtVotesPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, std::shared_ptr slashing_manager, const addr_t& node_addr, - const std::string& log_channel_name); + const std::string& log_channel_name) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), + last_votes_sync_request_time_(std::chrono::system_clock::now()), + last_pbft_block_sync_request_time_(std::chrono::system_clock::now()), + pbft_mgr_(std::move(pbft_mgr)), + pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), + slashing_manager_(std::move(slashing_manager)) {} virtual ~ExtVotesPacketHandler() = default; ExtVotesPacketHandler(const ExtVotesPacketHandler&) = delete; @@ -41,17 +47,110 @@ class ExtVotesPacketHandler : public PacketHandler { * @return if vote was successfully processed, otherwise false */ bool processVote(const std::shared_ptr& vote, const std::shared_ptr& pbft_block, - const std::shared_ptr& peer, bool validate_max_round_step); + const std::shared_ptr& peer, bool validate_max_round_step) { + if (pbft_block && !validateVoteAndBlock(vote, pbft_block)) { + throw MaliciousPeerException("Received vote's voted value != received pbft block"); + } + + if (vote_mgr_->voteInVerifiedMap(vote)) { + LOG(this->log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue"; + return false; + } + + // Validate vote's period, round and step min/max values + if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { + LOG(this->log_wr_) << "Vote period/round/step " << vote->getHash() + << " validation failed. Err: " << vote_valid.second; + return false; + } + + // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote + // (for a value that isn't NBH) per period, round & step + if (auto vote_valid = vote_mgr_->isUniqueVote(vote); !vote_valid.first) { + // Create double voting proof + slashing_manager_->submitDoubleVotingProof(vote, vote_valid.second); + throw MaliciousPeerException("Received double vote", vote->getVoter()); + } + + // Validate vote's signature, vrf, etc... + if (const auto vote_valid = vote_mgr_->validateVote(vote); !vote_valid.first) { + LOG(this->log_wr_) << "Vote " << vote->getHash() << " validation failed. Err: " << vote_valid.second; + return false; + } + + if (!vote_mgr_->addVerifiedVote(vote)) { + LOG(this->log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue(race condition)"; + return false; + } + + if (pbft_block) { + pbft_mgr_->processProposedBlock(pbft_block, vote); + } + + return true; + } /** * @brief Checks is vote is relevant for current pbft state in terms of period, round and type * @param vote * @return true if vote is relevant for current pbft state, otherwise false */ - bool isPbftRelevantVote(const std::shared_ptr& vote) const; + bool isPbftRelevantVote(const std::shared_ptr& vote) const { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { + // Standard current or future vote + return true; + } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && + vote->getType() == PbftVoteTypes::next_vote) { + // Previous round next vote + return true; + } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { + // Previous period cert vote - potential reward vote + return true; + } + + return false; + } virtual void sendPbftVotesBundle(const std::shared_ptr& peer, - std::vector>&& votes); + std::vector>&& votes) { + if (votes.empty()) { + return; + } + + auto sendVotes = [this, &peer](std::vector>&& votes) { + auto packet = VotesBundlePacket{OptimizedPbftVotesBundle{.votes = std::move(votes)}}; + if (this->sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, encodePacketRlp(packet))) { + LOG(this->log_dg_) << " Votes bundle with " << packet.votes_bundle.votes.size() << " votes sent to " + << peer->getId(); + for (const auto& vote : packet.votes_bundle.votes) { + peer->markPbftVoteAsKnown(vote->getHash()); + } + } + }; + + if (votes.size() <= kMaxVotesInBundleRlp) { + sendVotes(std::move(votes)); + return; + } else { + // Need to split votes into multiple packets + size_t index = 0; + while (index < votes.size()) { + const size_t votes_count = std::min(kMaxVotesInBundleRlp, votes.size() - index); + + const auto begin_it = std::next(votes.begin(), index); + const auto end_it = std::next(begin_it, votes_count); + + std::vector> votes_sub_vector; + std::move(begin_it, end_it, std::back_inserter(votes_sub_vector)); + + sendVotes(std::move(votes_sub_vector)); + + index += votes_count; + } + } + } private: /** @@ -64,7 +163,86 @@ class ExtVotesPacketHandler : public PacketHandler { */ std::pair validateVotePeriodRoundStep(const std::shared_ptr& vote, const std::shared_ptr& peer, - bool validate_max_round_step); + bool validate_max_round_step) { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + auto genErrMsg = [period = current_pbft_period, round = current_pbft_round, + step = pbft_mgr_->getPbftStep()](const std::shared_ptr& vote) -> std::string { + std::stringstream err; + err << "Vote " << vote->getHash() << " (period, round, step) = (" << vote->getPeriod() << ", " << vote->getRound() + << ", " << vote->getStep() << "). Current PBFT (period, round, step) = (" << period << ", " << round << ", " + << step << ")"; + return err.str(); + }; + + // Period validation + // vote->getPeriod() == current_pbft_period - 1 && cert_vote -> potential reward vote + if (vote->getPeriod() < current_pbft_period - 1 || + (vote->getPeriod() == current_pbft_period - 1 && vote->getType() != PbftVoteTypes::cert_vote)) { + return {false, "Invalid period(too small): " + genErrMsg(vote)}; + } else if (this->kConf.network.ddos_protection.vote_accepting_periods && + vote->getPeriod() - 1 > + current_pbft_period + this->kConf.network.ddos_protection.vote_accepting_periods) { + // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 + // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract + // Do not request round sync too often here + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { + // request PBFT chain sync from this node + this->sealAndSend( + peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, + encodePacketRlp(GetPbftSyncPacket{std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load())})); + last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); + } + + return {false, "Invalid period(too big): " + genErrMsg(vote)}; + } + + // Round validation + auto checking_round = current_pbft_round; + // If period is not the same we assume current round is equal to 1 + // So we won't accept votes for future period with round bigger than kConf.network.vote_accepting_steps + if (current_pbft_period != vote->getPeriod()) { + checking_round = 1; + } + + // vote->getRound() == checking_round - 1 && next_vote -> previous round next vote + if (vote->getRound() < checking_round - 1 || + (vote->getRound() == checking_round - 1 && vote->getType() != PbftVoteTypes::next_vote)) { + return {false, "Invalid round(too small): " + genErrMsg(vote)}; + } else if (validate_max_round_step && this->kConf.network.ddos_protection.vote_accepting_rounds && + vote->getRound() >= checking_round + this->kConf.network.ddos_protection.vote_accepting_rounds) { + // skip this check if kConf.network.vote_accepting_rounds == 0 + // Trigger votes(round) syncing only if we are in sync in terms of period + if (current_pbft_period == vote->getPeriod()) { + // Do not request round sync too often here + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { + // request round votes sync from this node + this->requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); + last_votes_sync_request_time_ = std::chrono::system_clock::now(); + } + } + + return {false, "Invalid round(too big): " + genErrMsg(vote)}; + } + + // Step validation + auto checking_step = pbft_mgr_->getPbftStep(); + // If period or round is not the same we assume current step is equal to 1 + // So we won't accept votes for future rounds with step bigger than kConf.network.vote_accepting_steps + if (current_pbft_period != vote->getPeriod() || current_pbft_round != vote->getRound()) { + checking_step = 1; + } + + // skip check if kConf.network.vote_accepting_steps == 0 + if (validate_max_round_step && this->kConf.network.ddos_protection.vote_accepting_steps && + vote->getStep() >= checking_step + this->kConf.network.ddos_protection.vote_accepting_steps) { + return {false, "Invalid step(too big): " + genErrMsg(vote)}; + } + + return {true, ""}; + } /** * @brief Validates provided vote if voted value == provided block @@ -73,7 +251,14 @@ class ExtVotesPacketHandler : public PacketHandler { * @param pbft_block * @return true if validation successful, otherwise false */ - bool validateVoteAndBlock(const std::shared_ptr& vote, const std::shared_ptr& pbft_block) const; + bool validateVoteAndBlock(const std::shared_ptr& vote, const std::shared_ptr& pbft_block) const { + if (pbft_block->getBlockHash() != vote->getBlockHash()) { + LOG(this->log_er_) << "Vote " << vote->getHash() << " voted block " << vote->getBlockHash() << " != actual block " + << pbft_block->getBlockHash(); + return false; + } + return true; + } protected: constexpr static size_t kMaxVotesInBundleRlp{1000}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp index 6262b6b477..ed19f219c9 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp @@ -5,29 +5,42 @@ #include #include -#include "common/thread_pool.hpp" #include "exceptions.hpp" #include "logger/logger.hpp" #include "network/tarcap/packet_types.hpp" +#include "network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "network/tarcap/shared_states/peers_state.hpp" +#include "network/tarcap/stats/time_period_packets_stats.hpp" #include "network/tarcap/taraxa_peer.hpp" #include "network/threadpool/packet_data.hpp" namespace taraxa::network::tarcap { -// Taraxa capability name -constexpr char TARAXA_CAPABILITY_NAME[] = "taraxa"; +template +PacketType decodePacketRlp(const dev::RLP& packet_rlp) { + return util::rlp_dec(packet_rlp); +} -class TimePeriodPacketsStats; +template +dev::bytes encodePacketRlp(PacketType packet) { + return util::rlp_enc(packet); +} /** * @brief Packet handler base class that consists of shared state and some commonly used functions */ -class PacketHandler { +template +class PacketHandler : public BasePacketHandler { public: PacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, const addr_t& node_addr, - const std::string& log_channel_name); + const std::string& log_channel_name) + : kConf(conf), peers_state_(std::move(peers_state)), packets_stats_(std::move(packets_stats)) { + LOG_OBJECTS_CREATE(log_channel_name); + } + virtual ~PacketHandler() = default; PacketHandler(const PacketHandler&) = default; PacketHandler(PacketHandler&&) = default; @@ -39,39 +52,133 @@ class PacketHandler { * * @param packet_data */ - void processPacket(const threadpool::PacketData& packet_data); - - void requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, PbftRound pbft_round); + // TODO: use unique_ptr for packet data for easier & quicker copying + void processPacket(const threadpool::PacketData& packet_data) override { + try { + const auto begin = std::chrono::steady_clock::now(); + + // It can rarely happen that packet was received and pushed into the queue when peer was still in peers map, + // in the meantime the connection was lost and we started to process packet from such peer + const auto peer = peers_state_->getPacketSenderPeer(packet_data.from_node_id_, packet_data.type_); + if (!peer.first) [[unlikely]] { + LOG(log_wr_) << "Unable to process packet. Reason: " << peer.second; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + return; + } + + // Decode packet rlp into packet object + auto packet = decodePacketRlp(packet_data.rlp_); + + // Main processing function + process(std::move(packet), peer.first); + + auto processing_duration = + std::chrono::duration_cast(std::chrono::steady_clock::now() - begin); + auto tp_wait_duration = std::chrono::duration_cast(std::chrono::steady_clock::now() - + packet_data.receive_time_); + + PacketStats packet_stats{1 /* count */, packet_data.rlp_.data().size(), processing_duration, tp_wait_duration}; + peer.first->addSentPacket(packet_data.type_str_, packet_stats); + + if (kConf.network.ddos_protection.log_packets_stats) { + packets_stats_->addReceivedPacket(packet_data.type_str_, packet_data.from_node_id_, packet_stats); + } + + } catch (const MaliciousPeerException& e) { + // thrown during packets processing -> malicious peer, invalid rlp items count, ... + // If there is custom peer set in exception, disconnect him, not packet sender + if (const auto custom_peer = e.getPeer(); custom_peer.has_value()) { + handle_caught_exception(e.what(), packet_data, *custom_peer, e.getDisconnectReason(), + true /* set peer as malicious */); + } else { + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), + true /* set peer as malicious */); + } + } catch (const PacketProcessingException& e) { + // thrown during packets processing... + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), + true /* set peer as malicious */); + } catch (const dev::RLPException& e) { + // thrown during parsing inside aleth/libdevcore -> type mismatch + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, dev::p2p::DisconnectReason::BadProtocol, + true /* set peer as malicious */); + } catch (const std::exception& e) { + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_); + } catch (...) { + handle_caught_exception("Unknown exception", packet_data, packet_data.from_node_id_); + } + } + + // TODO: probbaly should not be here but in specific packet class ??? + void requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, PbftRound pbft_round) { + LOG(log_dg_) << "Sending GetNextVotesSyncPacket with period:" << pbft_period << ", round:" << pbft_round; + const auto packet = GetNextVotesBundlePacket{.peer_pbft_period = pbft_period, .peer_pbft_round = pbft_round}; + sealAndSend(peerID, SubprotocolPacketType::kGetNextVotesSyncPacket, encodePacketRlp(packet)); + } private: void handle_caught_exception(std::string_view exception_msg, const threadpool::PacketData& packet_data, const dev::p2p::NodeID& peer, dev::p2p::DisconnectReason disconnect_reason = dev::p2p::DisconnectReason::UserReason, - bool set_peer_as_malicious = false); + bool set_peer_as_malicious = false) { + LOG(log_er_) << "Exception caught during packet processing: " << exception_msg << " ." + << "PacketData: " << jsonToUnstyledString(packet_data.getPacketDataJson()) + << ", disconnect peer: " << peer.toString(); - /** - * @brief Main packet processing function - */ - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) = 0; + if (set_peer_as_malicious) { + peers_state_->set_peer_malicious(peer); + } - /** - * @brief Validates packet rlp format - items count - * - * @throws InvalidRlpItemsCountException exception - */ - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const = 0; + disconnect(peer, disconnect_reason); + } - protected: /** - * @brief Checks if packet rlp is a list, if not it throws InvalidRlpItemsCountException - * - * @param packet_data - * @throws InvalidRlpItemsCountException exception + * @brief Main packet processing function */ - void checkPacketRlpIsList(const threadpool::PacketData& packet_data) const; + virtual void process(PacketType&& packet, const std::shared_ptr& peer) = 0; - bool sealAndSend(const dev::p2p::NodeID& nodeID, SubprotocolPacketType packet_type, dev::RLPStream&& rlp); - void disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason); + protected: + bool sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, dev::bytes&& rlp_bytes) { + auto host = peers_state_->host_.lock(); + if (!host) { + LOG(log_er_) << "sealAndSend failed to obtain host"; + return false; + } + + if (const auto peer = peers_state_->getPacketSenderPeer(node_id, packet_type); !peer.first) [[unlikely]] { + LOG(log_wr_) << "Unable to send packet. Reason: " << peer.second; + host->disconnect(node_id, dev::p2p::UserReason); + return false; + } + + const auto begin = std::chrono::steady_clock::now(); + const size_t packet_size = rlp_bytes.size(); + + host->send(node_id, TARAXA_CAPABILITY_NAME, packet_type, std::move(rlp_bytes), + [begin, node_id, packet_size, packet_type, this]() { + if (!kConf.network.ddos_protection.log_packets_stats) { + return; + } + + PacketStats packet_stats{ + 1 /* count */, packet_size, + std::chrono::duration_cast(std::chrono::steady_clock::now() - begin), + std::chrono::microseconds{0}}; + + packets_stats_->addSentPacket(convertPacketTypeToString(packet_type), node_id, packet_stats); + }); + + return true; + } + + void disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason) { + if (auto host = peers_state_->host_.lock(); host) { + LOG(log_nf_) << "Disconnect node " << node_id.abridged(); + host->disconnect(node_id, reason); + } else { + LOG(log_er_) << "Unable to disconnect node " << node_id.abridged() << " due to invalid host."; + } + } protected: // Node config diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp index c2e39296c0..14eca484ed 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets/latest/dag_block_packet.hpp" namespace taraxa { class TransactionManager; @@ -8,33 +9,29 @@ class TransactionManager; namespace taraxa::network::tarcap { -class TestState; - -class DagBlockPacketHandler : public ExtSyncingPacketHandler { +class DagBlockPacketHandler : public ExtSyncingPacketHandler { public: DagBlockPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, std::shared_ptr db, - bool trxs_in_dag_packet, const addr_t &node_addr, const std::string &logs_prefix = ""); + const addr_t &node_addr, const std::string &logs_prefix = ""); - void sendBlock(dev::p2p::NodeID const &peer_id, DagBlock block, const SharedTransactions &trxs); - void sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, DagBlock block, const SharedTransactions &trxs); - void onNewBlockReceived(DagBlock &&block, const std::shared_ptr &peer = nullptr, + void sendBlockWithTransactions(const std::shared_ptr &peer, const std::shared_ptr &block, + SharedTransactions &&trxs); + void onNewBlockReceived(std::shared_ptr &&block, const std::shared_ptr &peer = nullptr, const std::unordered_map> &trxs = {}); - void onNewBlockVerified(const DagBlock &block, bool proposed, const SharedTransactions &trxs); + void onNewBlockVerified(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::DagBlockPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData &packet_data) const override; - virtual void process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) override; + virtual void process(DagBlockPacket &&packet, const std::shared_ptr &peer) override; protected: std::shared_ptr trx_mgr_{nullptr}; - const bool kTrxsInDagPacket; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp index 631e94fb8c..95009bef8c 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets/latest/dag_sync_packet.hpp" namespace taraxa { class TransactionManager; @@ -8,7 +9,7 @@ class TransactionManager; namespace taraxa::network::tarcap { -class DagSyncPacketHandler : public ExtSyncingPacketHandler { +class DagSyncPacketHandler : public ExtSyncingPacketHandler { public: DagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -18,11 +19,10 @@ class DagSyncPacketHandler : public ExtSyncingPacketHandler { const addr_t& node_addr, const std::string& logs_prefix = ""); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::DagSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(DagSyncPacket&& packet, const std::shared_ptr& peer) override; protected: std::shared_ptr trx_mgr_{nullptr}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp index 4c48aa779b..70bac09778 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp @@ -1,6 +1,8 @@ #pragma once #include "common/packet_handler.hpp" +#include "network/tarcap/packets/latest/get_dag_sync_packet.hpp" +#include "transaction/transaction.hpp" namespace taraxa { class DagManager; @@ -10,7 +12,7 @@ class TransactionManager; namespace taraxa::network::tarcap { -class GetDagSyncPacketHandler : public PacketHandler { +class GetDagSyncPacketHandler : public PacketHandler { public: GetDagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -22,11 +24,10 @@ class GetDagSyncPacketHandler : public PacketHandler { SharedTransactions&& transactions, PbftPeriod request_period, PbftPeriod period); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetDagSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(GetDagSyncPacket&& packet, const std::shared_ptr& peer) override; protected: std::shared_ptr trx_mgr_; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp index acabecec08..80780fdeba 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp" namespace taraxa { class PbftManager; @@ -9,7 +10,7 @@ class VoteManager; namespace taraxa::network::tarcap { -class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { +class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { public: GetNextVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -19,11 +20,10 @@ class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { const std::string& logs_prefix = "GET_NEXT_VOTES_BUNDLE_PH"); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetNextVotesSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(GetNextVotesBundlePacket&& packet, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp index 763bebb372..1d8d170fd3 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/packet_handler.hpp" +#include "network/tarcap/packets/latest/get_pbft_sync_packet.hpp" namespace taraxa { class PbftChain; @@ -12,7 +13,7 @@ namespace taraxa::network::tarcap { class PbftSyncingState; -class GetPbftSyncPacketHandler : public PacketHandler { +class GetPbftSyncPacketHandler : public PacketHandler { public: GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -21,11 +22,10 @@ class GetPbftSyncPacketHandler : public PacketHandler { const addr_t& node_addr, const std::string& logs_prefix = "GET_PBFT_SYNC_PH"); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetPbftSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(GetPbftSyncPacket&& packet, const std::shared_ptr& peer) override; virtual void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced); diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp index f487db5e30..c3d5fbaa2d 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp @@ -1,12 +1,12 @@ #pragma once #include "common/packet_handler.hpp" -#include "pillar_chain/pillar_block.hpp" +#include "network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp" #include "pillar_chain/pillar_chain_manager.hpp" namespace taraxa::network::tarcap { -class GetPillarVotesBundlePacketHandler : public PacketHandler { +class GetPillarVotesBundlePacketHandler : public PacketHandler { public: GetPillarVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -17,15 +17,12 @@ class GetPillarVotesBundlePacketHandler : public PacketHandler { const std::shared_ptr& peer); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetPillarVotesBundlePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPillarVotesBundlePacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(GetPillarVotesBundlePacket&& packet, const std::shared_ptr& peer) override; protected: - constexpr static size_t kGetPillarVotesBundlePacketSize{2}; - std::shared_ptr pillar_chain_manager_; }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp index 2fc0fb5a25..bfd065bb12 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp @@ -1,11 +1,13 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" +#include "common/thread_pool.hpp" +#include "network/tarcap/packets/latest/pbft_sync_packet.hpp" #include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap { -class PbftSyncPacketHandler : public ExtSyncingPacketHandler { +class PbftSyncPacketHandler : public ExtSyncingPacketHandler { public: PbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -17,26 +19,22 @@ class PbftSyncPacketHandler : public ExtSyncingPacketHandler { void handleMaliciousSyncPeer(const dev::p2p::NodeID& id); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PbftSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(PbftSyncPacket&& packet, const std::shared_ptr& peer) override; protected: virtual PeriodData decodePeriodData(const dev::RLP& period_data_rlp) const; virtual std::vector> decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const; void pbftSyncComplete(); - void delayedPbftSync(int counter); + void delayedPbftSync(uint32_t counter); static constexpr uint32_t kDelayedPbftSyncDelayMs = 10; std::shared_ptr vote_mgr_; util::ThreadPool periodic_events_tp_; - - static constexpr size_t kStandardPacketSize = 2; - static constexpr size_t kChainSyncedPacketSize = 3; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp index 18f64d1767..028a472cb4 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once +#include "network/tarcap/packets/latest/pillar_vote_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" namespace taraxa::network::tarcap { -class PillarVotePacketHandler : public ExtPillarVotePacketHandler { +class PillarVotePacketHandler : public ExtPillarVotePacketHandler { public: PillarVotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -15,11 +16,10 @@ class PillarVotePacketHandler : public ExtPillarVotePacketHandler { void sendPillarVote(const std::shared_ptr& peer, const std::shared_ptr& vote); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PillarVotePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPillarVotePacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(PillarVotePacket&& packet, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp index 15a9ccfff7..4a7b521ebf 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once +#include "network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" namespace taraxa::network::tarcap { -class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { +class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { public: PillarVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -12,14 +13,12 @@ class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { const addr_t& node_addr, const std::string& logs_prefix); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PillarVotesBundlePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPillarVotesBundlePacket; - private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; - - public: constexpr static size_t kMaxPillarVotesInBundleRlp{250}; + + private: + virtual void process(PillarVotesBundlePacket&& packet, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp index 755db336be..24af6fb7bd 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets/latest/status_packet.hpp" namespace taraxa::network::tarcap { -class StatusPacketHandler : public ExtSyncingPacketHandler { +class StatusPacketHandler : public ExtSyncingPacketHandler { public: StatusPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -17,16 +18,12 @@ class StatusPacketHandler : public ExtSyncingPacketHandler { void sendStatusToPeers(); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::StatusPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(StatusPacket&& packet, const std::shared_ptr& peer) override; protected: - static constexpr uint16_t kInitialStatusPacketItemsCount = 11; - static constexpr uint16_t kStandardStatusPacketItemsCount = 4; - const h256 kGenesisHash; }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp index ed9d7e8a3d..f5a4c45e94 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/packet_handler.hpp" -#include "dag/dag_block.hpp" +#include "network/tarcap/packets/latest/transaction_packet.hpp" #include "transaction/transaction.hpp" namespace taraxa { @@ -11,13 +11,11 @@ enum class TransactionStatus; namespace taraxa::network::tarcap { -class TestState; - -class TransactionPacketHandler : public PacketHandler { +class TransactionPacketHandler : public PacketHandler { public: TransactionPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, - std::shared_ptr trx_mgr, const addr_t& node_addr, bool hash_gossip, + std::shared_ptr trx_mgr, const addr_t& node_addr, const std::string& logs_prefix = "TRANSACTION_PH"); /** @@ -39,25 +37,12 @@ class TransactionPacketHandler : public PacketHandler { void periodicSendTransactions(std::vector&& transactions); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::TransactionPacket; - - // 2 items: hashes and transactions - static constexpr uint32_t kTransactionPacketItemCount = 2; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(TransactionPacket&& packet, const std::shared_ptr& peer) override; protected: - /** - * @brief Sends batch of transactions to all connected peers - * @note Support of the old V2 version, remove once most of the network is updated or after a hardfork. This method is - * used as periodic event to broadcast transactions to the other peers in network - * - * @param transactions to be sent - */ - void periodicSendTransactionsWithoutHashGossip(std::vector&& transactions); - /** * @brief select which transactions and hashes to send to which connected peer * @@ -83,7 +68,6 @@ class TransactionPacketHandler : public PacketHandler { std::atomic received_trx_count_{0}; std::atomic unique_received_trx_count_{0}; - const bool kHashGossip = true; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp index 1c5bb1b18a..d88c9a2f6e 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once #include "common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets/latest/vote_packet.hpp" namespace taraxa::network::tarcap { -class VotePacketHandler : public ExtVotesPacketHandler { +class VotePacketHandler : public ExtVotesPacketHandler { public: VotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, @@ -25,15 +26,10 @@ class VotePacketHandler : public ExtVotesPacketHandler { const std::shared_ptr& block); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::VotePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; - - protected: - const size_t kVotePacketSize{1}; - const size_t kExtendedVotePacketSize{3}; + virtual void process(VotePacket&& packet, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp index 629485166d..daef9e4b9a 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once #include "common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets/latest/votes_bundle_packet.hpp" namespace taraxa::network::tarcap { -class VotesBundlePacketHandler : public ExtVotesPacketHandler { +class VotesBundlePacketHandler : public ExtVotesPacketHandler { public: VotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, @@ -23,11 +24,10 @@ class VotesBundlePacketHandler : public ExtVotesPacketHandler { const std::optional& exclude_node = {}); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::VotesBundlePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(VotesBundlePacket&& packet, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp new file mode 100644 index 0000000000..bee00ed59f --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include "packet_handler.hpp" + +namespace taraxa { +class KeyManager; +class PillarVote; + +namespace pillar_chain { +class PillarChainManager; +} // namespace pillar_chain + +namespace final_chain { +class FinalChain; +} + +} // namespace taraxa + +namespace taraxa::network::tarcap::v3 { + +class ExtPillarVotePacketHandler : public PacketHandler { + public: + ExtPillarVotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t& node_addr, const std::string& log_channel); + + protected: + bool processPillarVote(const std::shared_ptr& vote, const std::shared_ptr& peer); + + protected: + std::shared_ptr pillar_chain_manager_; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp new file mode 100644 index 0000000000..ce8835ed86 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp @@ -0,0 +1,66 @@ +#pragma once + +#include "dag/dag_manager.hpp" +#include "packet_handler.hpp" + +namespace taraxa { +class PbftChain; +class PbftManager; +class DagManager; +class DbStorage; +} // namespace taraxa + +namespace taraxa::network::tarcap { +class PbftSyncingState; +} + +namespace taraxa::network::tarcap::v3 { + +/** + * @brief ExtSyncingPacketHandler is extended abstract PacketHandler with added functions that are used in packet + * handlers that need to interact with syncing process in some way + */ +class ExtSyncingPacketHandler : public PacketHandler { + public: + ExtSyncingPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr db, const addr_t &node_addr, const std::string &log_channel_name); + + virtual ~ExtSyncingPacketHandler() = default; + ExtSyncingPacketHandler &operator=(const ExtSyncingPacketHandler &) = delete; + ExtSyncingPacketHandler &operator=(ExtSyncingPacketHandler &&) = delete; + + /** + * @brief Start syncing pbft if needed + * + */ + void startSyncingPbft(); + + /** + * @brief Send sync request to the current syncing peer with specified request_period + * + * @param request_period + * + * @return true if sync request was sent, otherwise false + */ + bool syncPeerPbft(PbftPeriod request_period); + + void requestDagBlocks(const dev::p2p::NodeID &_nodeID, const std::unordered_set &blocks, + PbftPeriod period); + void requestPendingDagBlocks(std::shared_ptr peer = nullptr); + + std::shared_ptr getMaxChainPeer(std::function &)> filter_func = + [](const std::shared_ptr &) { return true; }); + + protected: + std::shared_ptr pbft_syncing_state_{nullptr}; + + std::shared_ptr pbft_chain_{nullptr}; + std::shared_ptr pbft_mgr_{nullptr}; + std::shared_ptr dag_mgr_{nullptr}; + std::shared_ptr db_{nullptr}; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.hpp new file mode 100644 index 0000000000..827315d441 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.hpp @@ -0,0 +1,91 @@ +#pragma once + +#include "packet_handler.hpp" + +namespace taraxa { +class PbftVote; +class PbftManager; +class PbftChain; +class PbftBlock; +class VoteManager; +class SlashingManager; +} // namespace taraxa + +namespace taraxa::network::tarcap::v3 { + +/** + * @brief ExtVotesPacketHandler is extended abstract PacketHandler with added functions that are used in packet + * handlers that process pbft votes + */ +class ExtVotesPacketHandler : public PacketHandler { + public: + ExtVotesPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t& node_addr, + const std::string& log_channel_name); + + virtual ~ExtVotesPacketHandler() = default; + ExtVotesPacketHandler(const ExtVotesPacketHandler&) = delete; + ExtVotesPacketHandler(ExtVotesPacketHandler&&) = delete; + ExtVotesPacketHandler& operator=(const ExtVotesPacketHandler&) = delete; + ExtVotesPacketHandler& operator=(ExtVotesPacketHandler&&) = delete; + + /** + * @brief Process vote + * + * @param vote + * @param pbft_block + * @param peer + * @param validate_max_round_step + * @return if vote was successfully processed, otherwise false + */ + bool processVote(const std::shared_ptr& vote, const std::shared_ptr& pbft_block, + const std::shared_ptr& peer, bool validate_max_round_step); + + /** + * @brief Checks is vote is relevant for current pbft state in terms of period, round and type + * @param vote + * @return true if vote is relevant for current pbft state, otherwise false + */ + bool isPbftRelevantVote(const std::shared_ptr& vote) const; + + virtual void sendPbftVotesBundle(const std::shared_ptr& peer, + std::vector>&& votes); + + private: + /** + * @brief Validates vote period, round and step against max values from config + * + * @param vote to be validated + * @param peer + * @param validate_max_round_step validate also max round and step + * @return vote validation passed, otherwise + */ + std::pair validateVotePeriodRoundStep(const std::shared_ptr& vote, + const std::shared_ptr& peer, + bool validate_max_round_step); + + /** + * @brief Validates provided vote if voted value == provided block + * + * @param vote + * @param pbft_block + * @return true if validation successful, otherwise false + */ + bool validateVoteAndBlock(const std::shared_ptr& vote, const std::shared_ptr& pbft_block) const; + + protected: + constexpr static size_t kMaxVotesInBundleRlp{1000}; + constexpr static std::chrono::seconds kSyncRequestInterval = std::chrono::seconds(10); + + mutable std::chrono::system_clock::time_point last_votes_sync_request_time_; + mutable std::chrono::system_clock::time_point last_pbft_block_sync_request_time_; + + std::shared_ptr pbft_mgr_; + std::shared_ptr pbft_chain_; + std::shared_ptr vote_mgr_; + std::shared_ptr slashing_manager_; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/packet_handler.hpp new file mode 100644 index 0000000000..6cd6d92c94 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/packet_handler.hpp @@ -0,0 +1,86 @@ +#pragma once + +#include + +#include +#include + +#include "logger/logger.hpp" +#include "network/tarcap/packet_types.hpp" +#include "network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "network/tarcap/shared_states/peers_state.hpp" +#include "network/tarcap/taraxa_peer.hpp" +#include "network/threadpool/packet_data.hpp" + +namespace taraxa::network::tarcap::v3 { + +// class TimePeriodPacketsStats; + +/** + * @brief Packet handler base class that consists of shared state and some commonly used functions + */ +class PacketHandler : public BasePacketHandler { + public: + PacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, const addr_t& node_addr, + const std::string& log_channel_name); + virtual ~PacketHandler() = default; + PacketHandler(const PacketHandler&) = default; + PacketHandler(PacketHandler&&) = default; + PacketHandler& operator=(const PacketHandler&) = delete; + PacketHandler& operator=(PacketHandler&&) = delete; + + /** + * @brief Packet processing function wrapper that logs packet stats and calls process function + * + * @param packet_data + */ + void processPacket(const threadpool::PacketData& packet_data); + + void requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, PbftRound pbft_round); + + private: + void handle_caught_exception(std::string_view exception_msg, const threadpool::PacketData& packet_data, + const dev::p2p::NodeID& peer, + dev::p2p::DisconnectReason disconnect_reason = dev::p2p::DisconnectReason::UserReason, + bool set_peer_as_malicious = false); + + /** + * @brief Main packet processing function + */ + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) = 0; + + /** + * @brief Validates packet rlp format - items count + * + * @throws InvalidRlpItemsCountException exception + */ + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const = 0; + + protected: + /** + * @brief Checks if packet rlp is a list, if not it throws InvalidRlpItemsCountException + * + * @param packet_data + * @throws InvalidRlpItemsCountException exception + */ + void checkPacketRlpIsList(const threadpool::PacketData& packet_data) const; + + bool sealAndSend(const dev::p2p::NodeID& nodeID, SubprotocolPacketType packet_type, dev::RLPStream&& rlp); + void disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason); + + protected: + // Node config + const FullNodeConfig& kConf; + + std::shared_ptr peers_state_{nullptr}; + + // Shared packet stats + std::shared_ptr packets_stats_; + + // Declare logger instances + LOG_OBJECTS_DEFINE +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp new file mode 100644 index 0000000000..8cb9dbec48 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp @@ -0,0 +1,37 @@ +#pragma once + +#include "common/ext_syncing_packet_handler.hpp" + +namespace taraxa { +class TransactionManager; +} // namespace taraxa + +namespace taraxa::network::tarcap::v3 { + +class DagBlockPacketHandler : public ExtSyncingPacketHandler { + public: + DagBlockPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix = ""); + + void sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, const std::shared_ptr &block, + const SharedTransactions &trxs); + void onNewBlockReceived(std::shared_ptr &&block, const std::shared_ptr &peer = nullptr, + const std::unordered_map> &trxs = {}); + void onNewBlockVerified(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData &packet_data) const override; + virtual void process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) override; + + protected: + std::shared_ptr trx_mgr_{nullptr}; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_sync_packet_handler.hpp new file mode 100644 index 0000000000..2b00fd089b --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_sync_packet_handler.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include "common/ext_syncing_packet_handler.hpp" + +namespace taraxa { +class TransactionManager; +} // namespace taraxa + +namespace taraxa::network::tarcap::v3 { + +class DagSyncPacketHandler : public ExtSyncingPacketHandler { + public: + DagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr db, + const addr_t& node_addr, const std::string& logs_prefix = ""); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + std::shared_ptr trx_mgr_{nullptr}; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.hpp new file mode 100644 index 0000000000..c8e3cc9ee3 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.hpp @@ -0,0 +1,38 @@ +#pragma once + +#include "common/packet_handler.hpp" +#include "transaction/transaction.hpp" + +namespace taraxa { +class DagManager; +class DbStorage; +class TransactionManager; +} // namespace taraxa + +namespace taraxa::network::tarcap::v3 { + +class GetDagSyncPacketHandler : public PacketHandler { + public: + GetDagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr trx_mgr, std::shared_ptr dag_mgr, + std::shared_ptr db, const addr_t& node_addr, + const std::string& logs_prefix = "GET_DAG_SYNC_PH"); + + void sendBlocks(const dev::p2p::NodeID& peer_id, std::vector>&& blocks, + SharedTransactions&& transactions, PbftPeriod request_period, PbftPeriod period); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + std::shared_ptr trx_mgr_; + std::shared_ptr dag_mgr_; + std::shared_ptr db_; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.hpp new file mode 100644 index 0000000000..e154d7d418 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include "common/ext_votes_packet_handler.hpp" + +namespace taraxa { +class PbftManager; +class VoteManager; +} // namespace taraxa + +namespace taraxa::network::tarcap::v3 { + +class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { + public: + GetNextVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_mgr, std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t& node_addr, + const std::string& logs_prefix = "GET_NEXT_VOTES_BUNDLE_PH"); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp new file mode 100644 index 0000000000..388eef0b6a --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp @@ -0,0 +1,34 @@ +#pragma once +#include "../v3/common/packet_handler.hpp" +namespace taraxa { +class PbftChain; +class DbStorage; +class VoteManager; +} // namespace taraxa +namespace taraxa::network::tarcap { +class PbftSyncingState; +} +namespace taraxa::network::tarcap::v3 { +class GetPbftSyncPacketHandler : public PacketHandler { + public: + GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t& node_addr, const std::string& logs_prefix = "GET_PBFT_SYNC_PH"); + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, + size_t blocks_to_transfer, bool pbft_chain_synced); + + protected: + std::shared_ptr pbft_syncing_state_; + std::shared_ptr pbft_chain_; + std::shared_ptr vote_mgr_; + std::shared_ptr db_; +}; +} // namespace taraxa::network::tarcap::v3 \ No newline at end of file diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp new file mode 100644 index 0000000000..eb48aecc68 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include "common/packet_handler.hpp" +#include "pillar_chain/pillar_chain_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +class GetPillarVotesBundlePacketHandler : public PacketHandler { + public: + GetPillarVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t& node_addr, const std::string& logs_prefix); + + void requestPillarVotesBundle(PbftPeriod period, const blk_hash_t& pillar_block_hash, + const std::shared_ptr& peer); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPillarVotesBundlePacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + constexpr static size_t kGetPillarVotesBundlePacketSize{2}; + + std::shared_ptr pillar_chain_manager_; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp new file mode 100644 index 0000000000..25056f8018 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp @@ -0,0 +1,33 @@ +#pragma once +#include "../v3/common/ext_syncing_packet_handler.hpp" +#include "common/thread_pool.hpp" +#include "vote_manager/vote_manager.hpp" +namespace taraxa::network::tarcap::v3 { +class PbftSyncPacketHandler : public ExtSyncingPacketHandler { + public: + PbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t& node_addr, + const std::string& logs_prefix = "PBFT_SYNC_PH"); + void handleMaliciousSyncPeer(const dev::p2p::NodeID& id); + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + virtual PeriodData decodePeriodData(const dev::RLP& period_data_rlp) const; + virtual std::vector> decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const; + void pbftSyncComplete(); + void delayedPbftSync(uint32_t counter); + static constexpr uint32_t kDelayedPbftSyncDelayMs = 10; + std::shared_ptr vote_mgr_; + util::ThreadPool periodic_events_tp_; + static constexpr size_t kStandardPacketSize = 2; + static constexpr size_t kChainSyncedPacketSize = 3; +}; +} // namespace taraxa::network::tarcap::v3 \ No newline at end of file diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp new file mode 100644 index 0000000000..ccec8fc878 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include "network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp" + +namespace taraxa::network::tarcap::v3 { + +class PillarVotePacketHandler : public ExtPillarVotePacketHandler { + public: + PillarVotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t& node_addr, const std::string& logs_prefix); + + void onNewPillarVote(const std::shared_ptr& vote, bool rebroadcast = false); + void sendPillarVote(const std::shared_ptr& peer, const std::shared_ptr& vote); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPillarVotePacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp new file mode 100644 index 0000000000..efa63c4684 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include "network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp" + +namespace taraxa::network::tarcap::v3 { + +class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { + public: + PillarVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t& node_addr, const std::string& logs_prefix); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPillarVotesBundlePacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + public: + constexpr static size_t kMaxPillarVotesInBundleRlp{250}; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/status_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/status_packet_handler.hpp new file mode 100644 index 0000000000..6e19de6324 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/status_packet_handler.hpp @@ -0,0 +1,33 @@ +#pragma once + +#include "common/ext_syncing_packet_handler.hpp" + +namespace taraxa::network::tarcap::v3 { + +class StatusPacketHandler : public ExtSyncingPacketHandler { + public: + StatusPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr db, h256 genesis_hash, const addr_t& node_addr, + const std::string& logs_prefix = "STATUS_PH"); + + bool sendStatus(const dev::p2p::NodeID& node_id, bool initial); + void sendStatusToPeers(); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + static constexpr uint16_t kInitialStatusPacketItemsCount = 11; + static constexpr uint16_t kStandardStatusPacketItemsCount = 4; + + const h256 kGenesisHash; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp new file mode 100644 index 0000000000..6db64feb81 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp @@ -0,0 +1,78 @@ +#pragma once + +#include "common/packet_handler.hpp" +#include "transaction/transaction.hpp" + +namespace taraxa { +class TransactionManager; +enum class TransactionStatus; +} // namespace taraxa + +namespace taraxa::network::tarcap::v3 { + +class TestState; + +class TransactionPacketHandler : public PacketHandler { + public: + TransactionPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr trx_mgr, const addr_t& node_addr, + const std::string& logs_prefix = "TRANSACTION_PH"); + + /** + * @brief Send transactions + * + * @param peer peer to send transactions to + * @param transactions serialized transactions + * + */ + void sendTransactions(std::shared_ptr peer, + std::pair>&& transactions); + + /** + * @brief Sends batch of transactions to all connected peers + * @note This method is used as periodic event to broadcast transactions to the other peers in network + * + * @param transactions to be sent + */ + void periodicSendTransactions(std::vector&& transactions); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; + + // 2 items: hashes and transactions + static constexpr uint32_t kTransactionPacketItemCount = 2; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + /** + * @brief select which transactions and hashes to send to which connected peer + * + * @param transactions to be sent + * @return selected transactions and hashes to be sent per peer + */ + std::vector, std::pair>>> + transactionsToSendToPeers(std::vector&& transactions); + + /** + * @brief select which transactions and hashes to send to peer + * + * @param peer + * @param transactions grouped per account to be sent + * @param account_start_index which account to start with + * @return index of the next account to continue and selected transactions and hashes to be sent per peer + */ + std::pair>> transactionsToSendToPeer( + std::shared_ptr peer, const std::vector& transactions, + uint32_t account_start_index); + + std::shared_ptr trx_mgr_; + + std::atomic received_trx_count_{0}; + std::atomic unique_received_trx_count_{0}; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/vote_packet_handler.hpp new file mode 100644 index 0000000000..26796cc6d0 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/vote_packet_handler.hpp @@ -0,0 +1,39 @@ +#pragma once + +#include "common/ext_votes_packet_handler.hpp" + +namespace taraxa::network::tarcap::v3 { + +class VotePacketHandler : public ExtVotesPacketHandler { + public: + VotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t& node_addr, + const std::string& logs_prefix = ""); + + /** + * @brief Sends pbft vote to connected peers + * + * @param vote Votes to send + * @param block block to send - nullptr means no block + * @param rebroadcast - send even of vote i known for the peer + */ + void onNewPbftVote(const std::shared_ptr& vote, const std::shared_ptr& block, + bool rebroadcast = false); + void sendPbftVote(const std::shared_ptr& peer, const std::shared_ptr& vote, + const std::shared_ptr& block); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + const size_t kVotePacketSize{1}; + const size_t kExtendedVotePacketSize{3}; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp new file mode 100644 index 0000000000..972e9e64da --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp @@ -0,0 +1,33 @@ +#pragma once + +#include "common/ext_votes_packet_handler.hpp" + +namespace taraxa::network::tarcap::v3 { + +class VotesBundlePacketHandler : public ExtVotesPacketHandler { + public: + VotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t& node_addr, + const std::string& logs_prefix = ""); + + /** + * @brief Sends pbft votes bundle to connected peers + * + * @param votes Votes to send + * @param rebroadcast if rebroadcast is true, all votes are resent to all peers + * @param exclude_node do not send votes to excluded node + */ + void onNewPbftVotesBundle(const std::vector>& votes, bool rebroadcast = false, + const std::optional& exclude_node = {}); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/shared_states/pbft_syncing_state.hpp b/libraries/core_libs/network/include/network/tarcap/shared_states/pbft_syncing_state.hpp index 0c6145a77d..c6681cbfdb 100644 --- a/libraries/core_libs/network/include/network/tarcap/shared_states/pbft_syncing_state.hpp +++ b/libraries/core_libs/network/include/network/tarcap/shared_states/pbft_syncing_state.hpp @@ -1,9 +1,10 @@ #pragma once #include +#include +#include -#include "common/util.hpp" -#include "libp2p/Common.h" +#include "common/types.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/shared_states/peers_state.hpp b/libraries/core_libs/network/include/network/tarcap/shared_states/peers_state.hpp index ec89429fdc..a8411b3e72 100644 --- a/libraries/core_libs/network/include/network/tarcap/shared_states/peers_state.hpp +++ b/libraries/core_libs/network/include/network/tarcap/shared_states/peers_state.hpp @@ -2,13 +2,11 @@ #include "common/util.hpp" #include "config/config.hpp" -#include "dag/dag_block.hpp" #include "libp2p/Common.h" #include "libp2p/Host.h" #include "network/tarcap/packet_types.hpp" #include "network/tarcap/stats/time_period_packets_stats.hpp" #include "network/tarcap/taraxa_peer.hpp" -#include "transaction/transaction.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/stats/max_stats.hpp b/libraries/core_libs/network/include/network/tarcap/stats/max_stats.hpp index 1cabc4bd3b..dadb180309 100644 --- a/libraries/core_libs/network/include/network/tarcap/stats/max_stats.hpp +++ b/libraries/core_libs/network/include/network/tarcap/stats/max_stats.hpp @@ -1,6 +1,7 @@ #pragma once -#include "json/value.h" +#include + #include "network/tarcap/stats/packet_stats.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/stats/node_stats.hpp b/libraries/core_libs/network/include/network/tarcap/stats/node_stats.hpp index c6feffb50e..bd66040358 100644 --- a/libraries/core_libs/network/include/network/tarcap/stats/node_stats.hpp +++ b/libraries/core_libs/network/include/network/tarcap/stats/node_stats.hpp @@ -1,7 +1,8 @@ #pragma once +#include + #include "common/types.hpp" -#include "json/value.h" #include "logger/logger.hpp" #include "network/tarcap/tarcap_version.hpp" diff --git a/libraries/core_libs/network/include/network/tarcap/stats/packet_stats.hpp b/libraries/core_libs/network/include/network/tarcap/stats/packet_stats.hpp index bc00fa9fb3..32c962f05c 100644 --- a/libraries/core_libs/network/include/network/tarcap/stats/packet_stats.hpp +++ b/libraries/core_libs/network/include/network/tarcap/stats/packet_stats.hpp @@ -1,11 +1,10 @@ #pragma once +#include #include #include -#include "json/value.h" - namespace taraxa::network::tarcap { /** diff --git a/libraries/core_libs/network/include/network/tarcap/stats/packets_stats.hpp b/libraries/core_libs/network/include/network/tarcap/stats/packets_stats.hpp index fd1db55f78..45fce26778 100644 --- a/libraries/core_libs/network/include/network/tarcap/stats/packets_stats.hpp +++ b/libraries/core_libs/network/include/network/tarcap/stats/packets_stats.hpp @@ -1,9 +1,9 @@ #pragma once -#include +#include -#include "network/tarcap/stats/max_stats.hpp" -#include "network/tarcap/stats/packets_stats.hpp" +#include "max_stats.hpp" +#include "packets_stats.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp index 6dcf70c819..29bb6ecfb2 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp @@ -59,6 +59,9 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { */ static const InitPacketsHandlers kInitLatestVersionHandlers; + // TODO: remove this once we pass cornus hf + static const InitPacketsHandlers kInitV4Handlers; + public: TaraxaCapability(TarcapVersion version, const FullNodeConfig &conf, const h256 &genesis_hash, std::weak_ptr host, const dev::KeyPair &key, @@ -93,6 +96,7 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { private: bool filterSyncIrrelevantPackets(SubprotocolPacketType packet_type) const; + void handlePacketQueueOverLimit(std::shared_ptr host, dev::p2p::NodeID node_id, size_t tp_queue_size); private: // Capability version @@ -116,6 +120,12 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { // Main Threadpool for processing packets std::shared_ptr thread_pool_; + // Last disconnect time and number of peers + std::chrono::system_clock::time_point last_ddos_disconnect_time_ = {}; + std::chrono::system_clock::time_point queue_over_limit_start_time_ = {}; + bool queue_over_limit_ = false; + uint32_t last_disconnect_number_of_peers_ = 0; + LOG_OBJECTS_DEFINE }; diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp index 08b009eefe..424e1b787b 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp @@ -5,6 +5,7 @@ #include #include +#include "common/types.hpp" #include "common/util.hpp" #include "network/tarcap/stats/packets_stats.hpp" diff --git a/libraries/core_libs/network/include/network/threadpool/packet_data.hpp b/libraries/core_libs/network/include/network/threadpool/packet_data.hpp index af521b17ac..19670ec716 100644 --- a/libraries/core_libs/network/include/network/threadpool/packet_data.hpp +++ b/libraries/core_libs/network/include/network/threadpool/packet_data.hpp @@ -1,10 +1,10 @@ #pragma once +#include #include #include -#include "json/value.h" #include "network/tarcap/packet_types.hpp" namespace taraxa::network::threadpool { @@ -40,6 +40,7 @@ class PacketData { PacketId id_{0}; // Unique packet id (counter) std::chrono::steady_clock::time_point receive_time_; SubprotocolPacketType type_; + // TODO: might not need anymore ??? std::string type_str_; PacketPriority priority_; dev::p2p::NodeID from_node_id_; diff --git a/libraries/core_libs/network/include/network/threadpool/packets_blocking_mask.hpp b/libraries/core_libs/network/include/network/threadpool/packets_blocking_mask.hpp index be5823cec5..10fce1493a 100644 --- a/libraries/core_libs/network/include/network/threadpool/packets_blocking_mask.hpp +++ b/libraries/core_libs/network/include/network/threadpool/packets_blocking_mask.hpp @@ -2,9 +2,7 @@ #include -#include #include -#include #include "network/tarcap/packet_types.hpp" #include "network/threadpool/packet_data.hpp" diff --git a/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp b/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp index 196b8407a7..aa18188e0a 100644 --- a/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp +++ b/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp @@ -6,7 +6,6 @@ #include #include "logger/logger.hpp" -#include "network/tarcap/packet_types.hpp" #include "network/tarcap/tarcap_version.hpp" #include "network/threadpool/packets_blocking_mask.hpp" #include "packets_queue.hpp" @@ -57,6 +56,20 @@ class PriorityQueue { */ size_t getPrirotityQueueSize(PacketData::PacketPriority priority) const; + /** + * @param packet_type + * @return true for non-blocking packet types, otherwise false + */ + bool isNonBlockingPacket(SubprotocolPacketType packet_type) const; + + /** + * @brief Updates packet blocking dependency + * @param packet + * @param unblock_processing if true, unblock packet processing, otherwise block processing + * @return true if blocking dependency for provided packet was updated, otherwise false + */ + bool updateBlockingDependencies(const PacketData& packet, bool unblock_processing = false); + private: /** * @brief Queue can borrow reserved thread from one of the other priority queues but each queue must have diff --git a/libraries/core_libs/network/include/network/ws_server.hpp b/libraries/core_libs/network/include/network/ws_server.hpp index e1290c8aef..c7224e79f1 100644 --- a/libraries/core_libs/network/include/network/ws_server.hpp +++ b/libraries/core_libs/network/include/network/ws_server.hpp @@ -16,6 +16,7 @@ #include #include +#include "common/thread_pool.hpp" #include "config/config.hpp" #include "dag/dag_block.hpp" #include "final_chain/data.hpp" @@ -50,7 +51,7 @@ class WsSession : public std::enable_shared_from_this { virtual std::string processRequest(const std::string_view& request) = 0; void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); - void newDagBlock(const DagBlock& blk); + void newDagBlock(const std::shared_ptr& blk); void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); void newPbftBlockExecuted(const Json::Value& payload); void newPendingTransaction(const trx_hash_t& trx_hash); @@ -84,7 +85,8 @@ class WsSession : public std::enable_shared_from_this { // Accepts incoming connections and launches the sessions class WsServer : public std::enable_shared_from_this, public jsonrpc::AbstractServerConnector { public: - WsServer(boost::asio::io_context& ioc, tcp::endpoint endpoint, addr_t node_addr); + WsServer(std::shared_ptr thread_pool, tcp::endpoint endpoint, addr_t node_addr, + uint32_t max_pending_tasks); virtual ~WsServer(); WsServer(const WsServer&) = delete; @@ -95,12 +97,14 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: // Start accepting incoming connections void run(); void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); - void newDagBlock(const DagBlock& blk); + void newDagBlock(const std::shared_ptr& blk); void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); void newPbftBlockExecuted(const PbftBlock& sche_blk, const std::vector& finalized_dag_blk_hashes); void newPendingTransaction(const trx_hash_t& trx_hash); void newPillarBlockData(const pillar_chain::PillarBlockData& pillar_block_data); uint32_t numberOfSessions(); + uint32_t numberOfPendingTasks() const; + bool pendingTasksOverLimit() const { return numberOfPendingTasks() > kMaxPendingTasks; } virtual std::shared_ptr createSession(tcp::socket&& socket) = 0; @@ -118,6 +122,8 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: boost::shared_mutex sessions_mtx_; protected: + std::weak_ptr thread_pool_; + uint32_t kMaxPendingTasks; const addr_t node_addr_; }; diff --git a/libraries/core_libs/network/rpc/Debug.cpp b/libraries/core_libs/network/rpc/Debug.cpp index 9f45d79e98..e62990b409 100644 --- a/libraries/core_libs/network/rpc/Debug.cpp +++ b/libraries/core_libs/network/rpc/Debug.cpp @@ -3,12 +3,10 @@ #include #include -#include - #include "common/jsoncpp.hpp" #include "final_chain/state_api_data.hpp" #include "network/rpc/eth/data.hpp" -#include "pbft/pbft_manager.hpp" +#include "transaction/transaction.hpp" using namespace std; using namespace dev; @@ -17,29 +15,12 @@ using namespace taraxa; namespace taraxa::net { -inline EthBlockNumber get_ctx_block_num(EthBlockNumber block_number) { - return (block_number >= 1) ? block_number - 1 : 0; -} - -Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { - Json::Value res; - auto [trx, loc] = get_transaction_with_location(transaction_hash); - if (!trx || !loc) { - throw std::runtime_error("Transaction not found"); - } - if (auto node = full_node_.lock()) { - return util::readJsonFromString( - node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, get_ctx_block_num(loc->period))); - } - return res; -} - Json::Value Debug::debug_traceCall(const Json::Value& call_params, const std::string& blk_num) { Json::Value res; const auto block = parse_blk_num(blk_num); auto trx = to_eth_trx(call_params, block); if (auto node = full_node_.lock()) { - return util::readJsonFromString(node->getFinalChain()->trace({std::move(trx)}, block)); + return util::readJsonFromString(node->getFinalChain()->trace({}, {std::move(trx)}, block)); } return res; } @@ -51,7 +32,34 @@ Json::Value Debug::trace_call(const Json::Value& call_params, const Json::Value& auto params = parse_tracking_parms(trace_params); if (auto node = full_node_.lock()) { return util::readJsonFromString( - node->getFinalChain()->trace({to_eth_trx(call_params, block)}, block, std::move(params))); + node->getFinalChain()->trace({}, {to_eth_trx(call_params, block)}, block, std::move(params))); + } + return res; +} + +std::tuple, state_api::EVMTransaction, uint64_t> +Debug::get_transaction_with_state(const std::string& transaction_hash) { + auto node = full_node_.lock(); + if (!node) { + return {}; + } + const auto hash = jsToFixed<32>(transaction_hash); + + auto loc = node->getFinalChain()->transactionLocation(hash); + if (!loc) { + throw std::runtime_error("Transaction not found"); + } + auto block_transactions = node->getFinalChain()->getTransactions(loc->period); + + auto state_trxs = SharedTransactions(block_transactions.begin(), block_transactions.begin() + loc->position); + + return {to_eth_trxs(state_trxs), to_eth_trx(block_transactions[loc->position]), loc->period}; +} +Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { + Json::Value res; + auto [state_trxs, trx, period] = get_transaction_with_state(transaction_hash); + if (auto node = full_node_.lock()) { + return util::readJsonFromString(node->getFinalChain()->trace({}, {trx}, period)); } return res; } @@ -59,13 +67,9 @@ Json::Value Debug::trace_call(const Json::Value& call_params, const Json::Value& Json::Value Debug::trace_replayTransaction(const std::string& transaction_hash, const Json::Value& trace_params) { Json::Value res; auto params = parse_tracking_parms(trace_params); - auto [trx, loc] = get_transaction_with_location(transaction_hash); - if (!trx || !loc) { - throw std::runtime_error("Transaction not found"); - } + auto [state_trxs, trx, period] = get_transaction_with_state(transaction_hash); if (auto node = full_node_.lock()) { - return util::readJsonFromString( - node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, get_ctx_block_num(loc->period), std::move(params))); + return util::readJsonFromString(node->getFinalChain()->trace(state_trxs, {trx}, period, params)); } return res; } @@ -79,12 +83,8 @@ Json::Value Debug::trace_replayBlockTransactions(const std::string& block_num, c if (!transactions.has_value() || transactions->empty()) { return Json::Value(Json::arrayValue); } - std::vector trxs; - trxs.reserve(transactions->size()); - std::transform(transactions->begin(), transactions->end(), std::back_inserter(trxs), - [this](auto t) { return to_eth_trx(std::move(t)); }); - return util::readJsonFromString( - node->getFinalChain()->trace(std::move(trxs), get_ctx_block_num(block), std::move(params))); + std::vector trxs = to_eth_trxs(*transactions); + return util::readJsonFromString(node->getFinalChain()->trace({}, std::move(trxs), block, std::move(params))); } return res; } @@ -129,7 +129,7 @@ Json::Value Debug::debug_getPeriodTransactionsWithReceipts(const std::string& _p } auto final_chain = node->getFinalChain(); auto period = dev::jsToInt(_period); - auto block_hash = final_chain->block_hash(period); + auto block_hash = final_chain->blockHash(period); auto trxs = node->getDB()->getPeriodTransactions(period); if (!trxs.has_value()) { return Json::Value(Json::arrayValue); @@ -137,9 +137,9 @@ Json::Value Debug::debug_getPeriodTransactionsWithReceipts(const std::string& _p return transformToJsonParallel(*trxs, [&final_chain, &block_hash](const auto& trx) { auto hash = trx->getHash(); - auto r = final_chain->transaction_receipt(hash); + auto r = final_chain->transactionReceipt(hash); auto location = - rpc::eth::ExtendedTransactionLocation{{*final_chain->transaction_location(hash), *block_hash}, hash}; + rpc::eth::ExtendedTransactionLocation{{*final_chain->transactionLocation(hash), *block_hash}, hash}; auto transaction = rpc::eth::LocalisedTransaction{trx, location}; auto receipt = rpc::eth::LocalisedTransactionReceipt{*r, location, trx->getSender(), trx->getReceiver()}; auto receipt_json = rpc::eth::toJson(receipt); @@ -191,7 +191,7 @@ Json::Value Debug::debug_getPreviousBlockCertVotes(const std::string& _period) { } const auto votes_period = votes.front()->getPeriod(); - const uint64_t total_dpos_votes_count = final_chain->dpos_eligible_total_vote_count(votes_period - 1); + const uint64_t total_dpos_votes_count = final_chain->dposEligibleTotalVoteCount(votes_period - 1); res["total_votes_count"] = total_dpos_votes_count; res["votes"] = transformToJsonParallel(votes, [&](const auto& vote) { vote_manager->validateVote(vote); @@ -214,7 +214,7 @@ Json::Value Debug::debug_dposValidatorTotalStakes(const std::string& _period) { auto vote_manager = node->getVoteManager(); auto period = dev::jsToInt(_period); - auto validatorsStakes = final_chain->dpos_validators_total_stakes(period); + auto validatorsStakes = final_chain->dposValidatorsTotalStakes(period); Json::Value res(Json::arrayValue); @@ -240,7 +240,7 @@ Json::Value Debug::debug_dposTotalAmountDelegated(const std::string& _period) { auto final_chain = node->getFinalChain(); auto period = dev::jsToInt(_period); - auto totalAmountDelegated = final_chain->dpos_total_amount_delegated(period); + auto totalAmountDelegated = final_chain->dposTotalAmountDelegated(period); return toJS(totalAmountDelegated); } catch (...) { @@ -262,6 +262,13 @@ state_api::Tracing Debug::parse_tracking_parms(const Json::Value& json) const { return ret; } +std::vector Debug::to_eth_trxs(const std::vector>& trxs) { + std::vector eth_trxs; + eth_trxs.reserve(trxs.size()); + std::transform(trxs.begin(), trxs.end(), std::back_inserter(eth_trxs), + [this](auto t) { return to_eth_trx(std::move(t)); }); + return eth_trxs; +} state_api::EVMTransaction Debug::to_eth_trx(std::shared_ptr t) const { return state_api::EVMTransaction{ t->getSender(), t->getGasPrice(), t->getReceiver(), t->getNonce(), t->getValue(), t->getGas(), t->getData(), @@ -310,7 +317,7 @@ state_api::EVMTransaction Debug::to_eth_trx(const Json::Value& json, EthBlockNum trx.nonce = jsToU256(json["nonce"].asString()); } else { if (auto node = full_node_.lock()) { - trx.nonce = node->getFinalChain()->get_account(trx.from, blk_num).value_or(state_api::ZeroAccount).nonce; + trx.nonce = node->getFinalChain()->getAccount(trx.from, blk_num).value_or(state_api::ZeroAccount).nonce; } } @@ -320,7 +327,7 @@ state_api::EVMTransaction Debug::to_eth_trx(const Json::Value& json, EthBlockNum EthBlockNumber Debug::parse_blk_num(const string& blk_num_str) { if (blk_num_str == "latest" || blk_num_str == "pending" || blk_num_str.empty()) { if (auto node = full_node_.lock()) { - return node->getFinalChain()->last_block_number(); + return node->getFinalChain()->lastBlockNumber(); } } else if (blk_num_str == "earliest") { return 0; @@ -338,13 +345,4 @@ Address Debug::to_address(const string& s) const { throw InvalidAddress(); } -std::pair, std::optional> -Debug::get_transaction_with_location(const std::string& transaction_hash) const { - if (auto node = full_node_.lock()) { - const auto hash = jsToFixed<32>(transaction_hash); - return {node->getDB()->getTransaction(hash), node->getFinalChain()->transaction_location(hash)}; - } - return {}; -} - } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Debug.h b/libraries/core_libs/network/rpc/Debug.h index 7d897e7fe5..29cfc1d2ac 100644 --- a/libraries/core_libs/network/rpc/Debug.h +++ b/libraries/core_libs/network/rpc/Debug.h @@ -47,11 +47,12 @@ class Debug : public DebugFace { private: state_api::EVMTransaction to_eth_trx(std::shared_ptr t) const; state_api::EVMTransaction to_eth_trx(const Json::Value& json, EthBlockNumber blk_num); + std::vector to_eth_trxs(const std::vector>& trxs); EthBlockNumber parse_blk_num(const string& blk_num_str); state_api::Tracing parse_tracking_parms(const Json::Value& json) const; Address to_address(const string& s) const; - std::pair, std::optional> - get_transaction_with_location(const std::string& transaction_hash) const; + std::tuple, state_api::EVMTransaction, uint64_t> get_transaction_with_state( + const std::string& transaction_hash); std::weak_ptr full_node_; const uint64_t kGasLimit = ((uint64_t)1 << 53) - 1; diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index 53198f04a1..92aaef506a 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -1,12 +1,13 @@ #include "Taraxa.h" +#include #include #include #include #include +#include "config/version.hpp" #include "dag/dag_manager.hpp" -#include "json/reader.h" #include "pbft/pbft_manager.hpp" #include "transaction/transaction_manager.hpp" @@ -110,6 +111,58 @@ Json::Value Taraxa::taraxa_getScheduleBlockByPeriod(const std::string& _period) } } +Json::Value Taraxa::taraxa_getNodeVersions() { + try { + Json::Value res; + auto node = tryGetNode(); + auto db = node->getDB(); + auto period = node->getFinalChain()->lastBlockNumber(); + const uint64_t max_blocks_to_process = 6000; + std::map node_version_map; + std::multimap> version_node_map; + std::map> version_count; + for (uint64_t i = period; i > 0 && period - i < max_blocks_to_process; i--) { + auto blk = db->getPbftBlock(i); + if (!blk.has_value()) { + break; + } + if (!node_version_map.contains(blk->getBeneficiary())) { + node_version_map[blk->getBeneficiary()] = blk->getExtraData()->getJson()["major_version"].asString() + "." + + blk->getExtraData()->getJson()["minor_version"].asString() + "." + + blk->getExtraData()->getJson()["patch_version"].asString(); + } + } + + auto total_vote_count = node->getFinalChain()->dposEligibleTotalVoteCount(period); + for (auto nv : node_version_map) { + auto vote_count = node->getFinalChain()->dposEligibleVoteCount(period, nv.first); + version_node_map.insert({nv.second, {nv.first, vote_count}}); + version_count[nv.second].first++; + version_count[nv.second].second += vote_count; + } + + res["nodes"] = Json::Value(Json::arrayValue); + for (auto vn : version_node_map) { + Json::Value node_json; + node_json["node"] = vn.second.first.toString(); + node_json["version"] = vn.first; + node_json["vote_count"] = vn.second.second; + res["nodes"].append(node_json); + } + res["versions"] = Json::Value(Json::arrayValue); + for (auto vc : version_count) { + Json::Value version_json; + version_json["version"] = vc.first; + version_json["node_count"] = vc.second.first; + version_json["vote_percentage"] = vc.second.second * 100 / total_vote_count; + res["versions"].append(version_json); + } + return res; + } catch (...) { + BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); + } +} + Json::Value Taraxa::taraxa_getDagBlockByLevel(const string& _blockLevel, bool _includeTransactions) { try { auto node = tryGetNode(); @@ -142,7 +195,7 @@ Json::Value Taraxa::taraxa_getConfig() { return enc_json(tryGetNode()->getConfig Json::Value Taraxa::taraxa_getChainStats() { Json::Value res; if (auto node = full_node_.lock()) { - res["pbft_period"] = Json::UInt64(node->getFinalChain()->last_block_number()); + res["pbft_period"] = Json::UInt64(node->getFinalChain()->lastBlockNumber()); res["dag_blocks_executed"] = Json::UInt64(node->getDB()->getNumBlockExecuted()); res["transactions_executed"] = Json::UInt64(node->getDB()->getNumTransactionExecuted()); } @@ -158,7 +211,7 @@ std::string Taraxa::taraxa_yield(const std::string& _period) { } auto period = dev::jsToInt(_period); - return toJS(node->getFinalChain()->dpos_yield(period)); + return toJS(node->getFinalChain()->dposYield(period)); } catch (...) { BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); } @@ -172,7 +225,7 @@ std::string Taraxa::taraxa_totalSupply(const std::string& _period) { } auto period = dev::jsToInt(_period); - return toJS(node->getFinalChain()->dpos_total_supply(period)); + return toJS(node->getFinalChain()->dposTotalSupply(period)); } catch (...) { BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); } diff --git a/libraries/core_libs/network/rpc/Taraxa.h b/libraries/core_libs/network/rpc/Taraxa.h index 52fc9d8b25..4e5e7c2f3f 100644 --- a/libraries/core_libs/network/rpc/Taraxa.h +++ b/libraries/core_libs/network/rpc/Taraxa.h @@ -26,6 +26,7 @@ class Taraxa : public TaraxaFace { virtual std::string taraxa_dagBlockLevel() override; virtual std::string taraxa_dagBlockPeriod() override; virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string& _period) override; + virtual Json::Value taraxa_getNodeVersions() override; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string& _period) override; virtual Json::Value taraxa_getConfig() override; virtual Json::Value taraxa_getChainStats() override; diff --git a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json index ca7576933d..255da4cd2b 100644 --- a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json @@ -47,6 +47,12 @@ "order": [], "returns": {} }, + { + "name": "taraxa_getNodeVersions", + "params": [], + "order": [], + "returns": {} + }, { "name": "taraxa_getConfig", "params": [], diff --git a/libraries/core_libs/network/rpc/TaraxaClient.h b/libraries/core_libs/network/rpc/TaraxaClient.h index e0b318774c..49a6a978d7 100644 --- a/libraries/core_libs/network/rpc/TaraxaClient.h +++ b/libraries/core_libs/network/rpc/TaraxaClient.h @@ -79,6 +79,15 @@ class TaraxaClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } + Json::Value taraxa_getNodeVersions() throw(jsonrpc::JsonRpcException) { + Json::Value p; + p = Json::nullValue; + Json::Value result = this->CallMethod("taraxa_getNodeVersions", p); + if (result.isObject()) + return result; + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } Json::Value taraxa_getConfig() throw(jsonrpc::JsonRpcException) { Json::Value p; p = Json::nullValue; diff --git a/libraries/core_libs/network/rpc/TaraxaFace.h b/libraries/core_libs/network/rpc/TaraxaFace.h index 797e6af5f0..4db4e97472 100644 --- a/libraries/core_libs/network/rpc/TaraxaFace.h +++ b/libraries/core_libs/network/rpc/TaraxaFace.h @@ -37,6 +37,9 @@ class TaraxaFace : public ServerInterface { this->bindAndAddMethod(jsonrpc::Procedure("taraxa_getScheduleBlockByPeriod", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::TaraxaFace::taraxa_getScheduleBlockByPeriodI); + this->bindAndAddMethod( + jsonrpc::Procedure("taraxa_getNodeVersions", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), + &taraxa::net::TaraxaFace::taraxa_getNodeVersionsI); this->bindAndAddMethod( jsonrpc::Procedure("taraxa_getConfig", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), &taraxa::net::TaraxaFace::taraxa_getConfigI); @@ -83,6 +86,10 @@ class TaraxaFace : public ServerInterface { inline virtual void taraxa_getScheduleBlockByPeriodI(const Json::Value &request, Json::Value &response) { response = this->taraxa_getScheduleBlockByPeriod(request[0u].asString()); } + inline virtual void taraxa_getNodeVersionsI(const Json::Value &request, Json::Value &response) { + (void)request; + response = this->taraxa_getNodeVersions(); + } inline virtual void taraxa_getConfigI(const Json::Value &request, Json::Value &response) { (void)request; response = this->taraxa_getConfig(); @@ -114,6 +121,7 @@ class TaraxaFace : public ServerInterface { virtual std::string taraxa_dagBlockLevel() = 0; virtual std::string taraxa_dagBlockPeriod() = 0; virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string ¶m1) = 0; + virtual Json::Value taraxa_getNodeVersions() = 0; virtual Json::Value taraxa_getConfig() = 0; virtual Json::Value taraxa_getChainStats() = 0; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string ¶m1) = 0; diff --git a/libraries/core_libs/network/rpc/Test.cpp b/libraries/core_libs/network/rpc/Test.cpp index 6f69dc1445..4f243f26c2 100644 --- a/libraries/core_libs/network/rpc/Test.cpp +++ b/libraries/core_libs/network/rpc/Test.cpp @@ -38,7 +38,7 @@ Json::Value Test::send_coin_transaction(const Json::Value ¶m1) { secret_t sk = secret_t(param1["secret"].asString()); uint64_t nonce = 0; if (!param1["nonce"]) { - auto acc = node->getFinalChain()->get_account(toAddress(sk)); + auto acc = node->getFinalChain()->getAccount(toAddress(sk)); nonce = acc->nonce.convert_to() + 1; } else { nonce = dev::jsToInt(param1["nonce"].asString()); diff --git a/libraries/core_libs/network/rpc/eth/Eth.cpp b/libraries/core_libs/network/rpc/eth/Eth.cpp index 7fcbdc5bc8..e5f57c2be9 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.cpp +++ b/libraries/core_libs/network/rpc/eth/Eth.cpp @@ -49,7 +49,7 @@ Json::Value toJson(const LocalisedTransaction& lt) { return toJson(*lt.trx, lt.t Json::Value toJson(const BlockHeader& obj) { Json::Value res(Json::objectValue); res["parentHash"] = toJS(obj.parent_hash); - res["sha3Uncles"] = toJS(BlockHeader::uncles_hash()); + res["sha3Uncles"] = toJS(BlockHeader::unclesHash()); res["stateRoot"] = toJS(obj.state_root); res["transactionsRoot"] = toJS(obj.transactions_root); res["receiptsRoot"] = toJS(obj.receipts_root); @@ -61,7 +61,7 @@ Json::Value toJson(const BlockHeader& obj) { res["timestamp"] = toJS(obj.timestamp); res["author"] = toJS(obj.author); res["miner"] = toJS(obj.author); - res["mixHash"] = toJS(BlockHeader::mix_hash()); + res["mixHash"] = toJS(BlockHeader::mixHash()); res["nonce"] = toJS(BlockHeader::nonce()); res["uncles"] = Json::Value(Json::arrayValue); res["hash"] = toJS(obj.hash); @@ -128,27 +128,27 @@ class EthImpl : public Eth, EthParams { Json::Value eth_accounts() override { return toJsonArray(vector{address}); } - string eth_blockNumber() override { return toJS(final_chain->last_block_number()); } + string eth_blockNumber() override { return toJS(final_chain->lastBlockNumber()); } string eth_getBalance(const string& _address, const Json::Value& _json) override { const auto block_number = get_block_number_from_json(_json); - return toJS(final_chain->get_account(toAddress(_address), block_number).value_or(ZeroAccount).balance); + return toJS(final_chain->getAccount(toAddress(_address), block_number).value_or(ZeroAccount).balance); } string eth_getStorageAt(const string& _address, const string& _position, const Json::Value& _json) override { const auto block_number = get_block_number_from_json(_json); - return toJS(final_chain->get_account_storage(toAddress(_address), jsToU256(_position), block_number)); + return toJS(final_chain->getAccountStorage(toAddress(_address), jsToU256(_position), block_number)); } string eth_getStorageRoot(const string& _address, const string& _blockNumber) override { - return toJS(final_chain->get_account(toAddress(_address), parse_blk_num(_blockNumber)) + return toJS(final_chain->getAccount(toAddress(_address), parse_blk_num(_blockNumber)) .value_or(ZeroAccount) .storage_root_eth()); } string eth_getCode(const string& _address, const Json::Value& _json) override { const auto block_number = get_block_number_from_json(_json); - return toJS(final_chain->get_code(toAddress(_address), block_number)); + return toJS(final_chain->getCode(toAddress(_address), block_number)); } string eth_call(const Json::Value& _json, const Json::Value& _jsonBlock) override { @@ -168,7 +168,7 @@ class EthImpl : public Eth, EthParams { if (!blockNumber.empty()) { blk_n = parse_blk_num(blockNumber); } else { - blk_n = final_chain->last_block_number(); + blk_n = final_chain->lastBlockNumber(); } prepare_transaction_for_call(t, blk_n); @@ -230,7 +230,7 @@ class EthImpl : public Eth, EthParams { } Json::Value eth_getBlockByHash(const string& _blockHash, bool _includeTransactions) override { - if (auto blk_n = final_chain->block_number(jsToFixed<32>(_blockHash)); blk_n) { + if (auto blk_n = final_chain->blockNumber(jsToFixed<32>(_blockHash)); blk_n) { return get_block_by_number(*blk_n, _includeTransactions); } return Json::Value(); @@ -314,7 +314,7 @@ class EthImpl : public Eth, EthParams { void note_pending_transaction(const h256& trx_hash) override { watches_.new_transactions_.process_update(trx_hash); } Json::Value get_block_by_number(EthBlockNumber blk_n, bool include_transactions) { - auto blk_header = final_chain->block_header(blk_n); + auto blk_header = final_chain->blockHeader(blk_n); if (!blk_header) { return Json::Value(); } @@ -329,7 +329,7 @@ class EthImpl : public Eth, EthParams { ++loc.position; } } else { - auto hashes = final_chain->transaction_hashes(blk_n); + auto hashes = final_chain->transactionHashes(blk_n); trxs_json = toJsonArray(*hashes); } return ret; @@ -340,12 +340,12 @@ class EthImpl : public Eth, EthParams { if (!trx) { return {}; } - auto loc = final_chain->transaction_location(h); + auto loc = final_chain->transactionLocation(h); return LocalisedTransaction{ trx, TransactionLocationWithBlockHash{ *loc, - *final_chain->block_hash(loc->period), + *final_chain->blockHash(loc->period), }, }; } @@ -359,18 +359,18 @@ class EthImpl : public Eth, EthParams { trxs[trx_pos], TransactionLocationWithBlockHash{ {blk_n, trx_pos}, - *final_chain->block_hash(blk_n), + *final_chain->blockHash(blk_n), }, }; } optional get_transaction(const h256& blk_h, uint64_t _i) const { - auto blk_n = final_chain->block_number(blk_h); + auto blk_n = final_chain->blockNumber(blk_h); return blk_n ? get_transaction(_i, *blk_n) : nullopt; } optional get_transaction_receipt(const h256& trx_h) const { - auto r = final_chain->transaction_receipt(trx_h); + auto r = final_chain->transactionReceipt(trx_h); if (!r) { return {}; } @@ -385,12 +385,12 @@ class EthImpl : public Eth, EthParams { } uint64_t transactionCount(const h256& block_hash) const { - auto n = final_chain->block_number(block_hash); + auto n = final_chain->blockNumber(block_hash); return n ? final_chain->transactionCount(n) : 0; } trx_nonce_t transaction_count(EthBlockNumber n, const Address& addr) { - return final_chain->get_account(addr, n).value_or(ZeroAccount).nonce; + return final_chain->getAccount(addr, n).value_or(ZeroAccount).nonce; } state_api::ExecutionResult call(EthBlockNumber blk_n, const TransactionSkeleton& trx) { @@ -479,7 +479,7 @@ class EthImpl : public Eth, EthParams { EthBlockNumber parse_blk_num(const string& blk_num_str) { auto ret = parse_blk_num_specific(blk_num_str); - return ret ? *ret : final_chain->last_block_number(); + return ret ? *ret : final_chain->lastBlockNumber(); } EthBlockNumber get_block_number_from_json(const Json::Value& json) { @@ -488,7 +488,7 @@ class EthImpl : public Eth, EthParams { return parse_blk_num(json["blockNumber"].asString()); } if (!json["blockHash"].empty()) { - if (auto ret = final_chain->block_number(jsToFixed<32>(json["blockHash"].asString()))) { + if (auto ret = final_chain->blockNumber(jsToFixed<32>(json["blockHash"].asString()))) { return *ret; } throw std::runtime_error("Resource not found"); @@ -505,7 +505,7 @@ class EthImpl : public Eth, EthParams { if (const auto& fromBlock = json["fromBlock"]; !fromBlock.empty()) { from_block = parse_blk_num(fromBlock.asString()); } else { - from_block = final_chain->last_block_number(); + from_block = final_chain->lastBlockNumber(); } if (const auto& toBlock = json["toBlock"]; !toBlock.empty()) { to_block = parse_blk_num_specific(toBlock.asString()); diff --git a/libraries/core_libs/network/rpc/eth/Eth.h b/libraries/core_libs/network/rpc/eth/Eth.h index c36285bf82..262f0af34a 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.h +++ b/libraries/core_libs/network/rpc/eth/Eth.h @@ -11,7 +11,7 @@ struct EthParams { Address address; uint64_t chain_id = 0; uint64_t gas_limit = ((uint64_t)1 << 53) - 1; - std::shared_ptr final_chain; + std::shared_ptr final_chain; std::function(const h256&)> get_trx; std::function& trx)> send_trx; std::function gas_pricer = [] { return u256(0); }; diff --git a/libraries/core_libs/network/rpc/eth/LogFilter.cpp b/libraries/core_libs/network/rpc/eth/LogFilter.cpp index 7723998bcf..3042b0d618 100644 --- a/libraries/core_libs/network/rpc/eth/LogFilter.cpp +++ b/libraries/core_libs/network/rpc/eth/LogFilter.cpp @@ -119,25 +119,25 @@ void LogFilter::match_one(const ExtendedTransactionLocation& trx_loc, const Tran } } -std::vector LogFilter::match_all(const FinalChain& final_chain) const { +std::vector LogFilter::match_all(const final_chain::FinalChain& final_chain) const { std::vector ret; - // to_block can't be greater than the last executed block number - const auto last_block_number = final_chain.last_block_number(); - auto to_blk_n = to_block_ ? *to_block_ : last_block_number; - if (to_blk_n > last_block_number) { - to_blk_n = last_block_number; - } - auto action = [&, this](EthBlockNumber blk_n) { - ExtendedTransactionLocation trx_loc{{{blk_n}, final_chain.block_hash(blk_n).value()}}; - auto hashes = final_chain.transaction_hashes(trx_loc.period); + ExtendedTransactionLocation trx_loc{{{blk_n}, *final_chain.blockHash(blk_n)}}; + auto hashes = final_chain.transactionHashes(trx_loc.period); for (const auto& hash : *hashes) { trx_loc.trx_hash = hash; - match_one(trx_loc, final_chain.transaction_receipt(hash).value(), [&](const auto& lle) { ret.push_back(lle); }); + match_one(trx_loc, *final_chain.transactionReceipt(hash), [&](const auto& lle) { ret.push_back(lle); }); ++trx_loc.position; } }; + // to_block can't be greater than the last executed block number + const auto last_block_number = final_chain.lastBlockNumber(); + auto to_blk_n = to_block_ ? *to_block_ : last_block_number; + if (to_blk_n > last_block_number) { + to_blk_n = last_block_number; + } + if (is_range_only_) { for (auto blk_n = from_block_; blk_n <= to_blk_n; ++blk_n) { action(blk_n); diff --git a/libraries/core_libs/network/rpc/eth/LogFilter.hpp b/libraries/core_libs/network/rpc/eth/LogFilter.hpp index 830717c319..08232c7a0f 100644 --- a/libraries/core_libs/network/rpc/eth/LogFilter.hpp +++ b/libraries/core_libs/network/rpc/eth/LogFilter.hpp @@ -27,7 +27,7 @@ struct LogFilter { bool blk_number_matches(EthBlockNumber blk_n) const; void match_one(ExtendedTransactionLocation const& trx_loc, TransactionReceipt const& r, std::function const& cb) const; - std::vector match_all(FinalChain const& final_chain) const; + std::vector match_all(final_chain::FinalChain const& final_chain) const; }; } // namespace taraxa::net::rpc::eth \ No newline at end of file diff --git a/libraries/core_libs/network/src/http_server.cpp b/libraries/core_libs/network/src/http_server.cpp index 7b681ac3fb..e89cdc9a72 100644 --- a/libraries/core_libs/network/src/http_server.cpp +++ b/libraries/core_libs/network/src/http_server.cpp @@ -2,9 +2,15 @@ namespace taraxa::net { -HttpServer::HttpServer(boost::asio::io_context &io, boost::asio::ip::tcp::endpoint ep, const addr_t &node_addr, - const std::shared_ptr &request_processor) - : request_processor_(request_processor), io_context_(io), acceptor_(io), ep_(std::move(ep)) { +HttpServer::HttpServer(std::shared_ptr thread_pool, boost::asio::ip::tcp::endpoint ep, + const addr_t &node_addr, const std::shared_ptr &request_processor, + uint32_t max_pending_tasks) + : request_processor_(request_processor), + io_context_(thread_pool->unsafe_get_io_context()), + acceptor_(thread_pool->unsafe_get_io_context()), + ep_(std::move(ep)), + thread_pool_(thread_pool), + kMaxPendingTasks(max_pending_tasks) { LOG_OBJECTS_CREATE("HTTP"); LOG(log_si_) << "Taraxa HttpServer started at port: " << ep_.port(); } @@ -66,6 +72,14 @@ bool HttpServer::stop() { return true; } +uint32_t HttpServer::numberOfPendingTasks() const { + auto thread_pool = thread_pool_.lock(); + if (thread_pool) { + return thread_pool->num_pending_tasks(); + } + return 0; +} + std::shared_ptr HttpConnection::getShared() { try { return shared_from_this(); @@ -98,10 +112,17 @@ void HttpConnection::read() { } else { assert(server_->request_processor_); LOG(server_->log_dg_) << "Received: " << request_; - response_ = server_->request_processor_->process(request_); - boost::beast::http::async_write( - socket_, response_, - [this_sp = getShared()](auto const & /*ec*/, auto /*bytes_transfered*/) { this_sp->stop(); }); + + if (server_->pendingTasksOverLimit()) { + LOG(server_->log_er_) << "HttpConnection closed - pending tasks over the limit " + << server_->numberOfPendingTasks(); + stop(); + } else { + response_ = server_->request_processor_->process(request_); + boost::beast::http::async_write( + socket_, response_, + [this_sp = getShared()](auto const & /*ec*/, auto /*bytes_transferred*/) { this_sp->stop(); }); + } } }); } diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 9091c68571..5d991f768f 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -5,7 +5,6 @@ #include #include -#include #include "config/version.hpp" #include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" @@ -16,6 +15,14 @@ #include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/status_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "network/tarcap/stats/node_stats.hpp" #include "network/tarcap/stats/time_period_packets_stats.hpp" @@ -59,6 +66,7 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi net_conf.traverseNAT = false; net_conf.publicIPAddress = config.network.public_ip; net_conf.pin = false; + net_conf.trustedNodes = config.network.trusted_nodes; dev::p2p::TaraxaNetworkConfig taraxa_net_conf; taraxa_net_conf.ideal_peer_count = config.network.ideal_peer_count; @@ -74,15 +82,16 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi dev::p2p::Host::CapabilitiesFactory constructCapabilities = [&](std::weak_ptr host) { assert(!host.expired()); - assert(kV2NetworkVersion < TARAXA_NET_VERSION); + assert(kV3NetworkVersion < TARAXA_NET_VERSION); dev::p2p::Host::CapabilityList capabilities; - // Register old version (V2) of taraxa capability - auto v2_tarcap = std::make_shared( - kV2NetworkVersion, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, - pbft_mgr, pbft_chain, vote_mgr, dag_mgr, trx_mgr, slashing_manager, pillar_chain_mgr); - capabilities.emplace_back(v2_tarcap); + // Register old version (V4) of taraxa capability + auto v3_tarcap = std::make_shared( + kV3NetworkVersion, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, + pbft_mgr, pbft_chain, vote_mgr, dag_mgr, trx_mgr, slashing_manager, pillar_chain_mgr, + network::tarcap::TaraxaCapability::kInitV4Handlers); + capabilities.emplace_back(v3_tarcap); // Register latest version of taraxa capability auto latest_tarcap = std::make_shared( @@ -132,6 +141,12 @@ void Network::start() { bool Network::isStarted() { return tp_.is_running(); } +bool Network::packetQueueOverLimit() const { + auto [hp_queue_size, mp_queue_size, lp_queue_size] = packets_tp_->getQueueSize(); + auto total_size = hp_queue_size + mp_queue_size + lp_queue_size; + return total_size > kConf.network.ddos_protection.max_packets_queue_size; +} + std::list Network::getAllNodes() const { return host_->getNodes(); } size_t Network::getPeerCount() { return host_->peer_count(); } @@ -176,8 +191,14 @@ void Network::registerPeriodicEvents(const std::shared_ptr &pbft_mg // Send new transactions auto sendTxs = [this, trx_mgr = trx_mgr]() { for (auto &tarcap : tarcaps_) { - auto tx_packet_handler = tarcap.second->getSpecificHandler(); - tx_packet_handler->periodicSendTransactions(trx_mgr->getAllPoolTrxs()); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + auto tx_packet_handler = tarcap.second->getSpecificHandler(); + tx_packet_handler->periodicSendTransactions(trx_mgr->getAllPoolTrxs()); + } else { + auto tx_packet_handler = tarcap.second->getSpecificHandler(); + tx_packet_handler->periodicSendTransactions(trx_mgr->getAllPoolTrxs()); + } } }; periodic_events_tp_.post_loop({kConf.network.transaction_interval_ms}, sendTxs); @@ -185,8 +206,14 @@ void Network::registerPeriodicEvents(const std::shared_ptr &pbft_mg // Send status packet auto sendStatus = [this]() { for (auto &tarcap : tarcaps_) { - auto status_packet_handler = tarcap.second->getSpecificHandler(); - status_packet_handler->sendStatusToPeers(); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + auto status_packet_handler = tarcap.second->getSpecificHandler(); + status_packet_handler->sendStatusToPeers(); + } else { + auto status_packet_handler = tarcap.second->getSpecificHandler(); + status_packet_handler->sendStatusToPeers(); + } } }; const auto send_status_interval = 6 * lambda_ms; @@ -283,30 +310,54 @@ void Network::addBootNodes(bool initial) { } } -void Network::gossipDagBlock(const DagBlock &block, bool proposed, const SharedTransactions &trxs) { +void Network::gossipDagBlock(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs) { for (const auto &tarcap : tarcaps_) { - tarcap.second->getSpecificHandler()->onNewBlockVerified(block, proposed, - trxs); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->onNewBlockVerified(block, proposed, + trxs); + } else { + tarcap.second->getSpecificHandler()->onNewBlockVerified( + block, proposed, trxs); + } } } void Network::gossipVote(const std::shared_ptr &vote, const std::shared_ptr &block, bool rebroadcast) { for (const auto &tarcap : tarcaps_) { - tarcap.second->getSpecificHandler()->onNewPbftVote(vote, block, rebroadcast); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->onNewPbftVote(vote, block, rebroadcast); + } else { + tarcap.second->getSpecificHandler()->onNewPbftVote(vote, block, + rebroadcast); + } } } void Network::gossipVotesBundle(const std::vector> &votes, bool rebroadcast) { for (const auto &tarcap : tarcaps_) { - tarcap.second->getSpecificHandler()->onNewPbftVotesBundle(votes, - rebroadcast); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->onNewPbftVotesBundle(votes, + rebroadcast); + } else { + tarcap.second->getSpecificHandler()->onNewPbftVotesBundle( + votes, rebroadcast); + } } } void Network::gossipPillarBlockVote(const std::shared_ptr &vote, bool rebroadcast) { for (const auto &tarcap : tarcaps_) { - tarcap.second->getSpecificHandler()->onNewPillarVote(vote, rebroadcast); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->onNewPillarVote(vote, rebroadcast); + } else { + tarcap.second->getSpecificHandler()->onNewPillarVote(vote, + rebroadcast); + } } } @@ -317,7 +368,12 @@ void Network::handleMaliciousSyncPeer(const dev::p2p::NodeID &node_id) { continue; } - tarcap.second->getSpecificHandler()->handleMaliciousSyncPeer(node_id); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->handleMaliciousSyncPeer(node_id); + } else { + tarcap.second->getSpecificHandler()->handleMaliciousSyncPeer(node_id); + } } } @@ -325,8 +381,15 @@ std::shared_ptr Network::getMaxChainPeer() const { std::shared_ptr max_chain_peer{nullptr}; for (const auto &tarcap : tarcaps_) { - const auto peer = - tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); + std::shared_ptr peer; + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + peer = tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); + } else { + peer = + tarcap.second->getSpecificHandler<::taraxa::network::tarcap::v3::PbftSyncPacketHandler>()->getMaxChainPeer(); + } + if (!peer) { continue; } @@ -344,16 +407,28 @@ std::shared_ptr Network::getMaxChainPeer() const { void Network::requestPillarBlockVotesBundle(taraxa::PbftPeriod period, const taraxa::blk_hash_t &pillar_block_hash) { for (const auto &tarcap : tarcaps_) { // Try to get most up-to-date peer - const auto peer = - tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); + std::shared_ptr peer; + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + peer = tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); + } else { + peer = + tarcap.second->getSpecificHandler<::taraxa::network::tarcap::v3::PbftSyncPacketHandler>()->getMaxChainPeer(); + } if (!peer) { continue; } // TODO[2748]: is it good enough to request it just from 1 peer without knowing if he has all of the votes ? - tarcap.second->getSpecificHandler()->requestPillarVotesBundle( - period, pillar_block_hash, peer); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->requestPillarVotesBundle( + period, pillar_block_hash, peer); + } else { + tarcap.second->getSpecificHandler() + ->requestPillarVotesBundle(period, pillar_block_hash, peer); + } } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handler.cpp index 6d7ca8ff85..0fc450ccc7 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handler.cpp @@ -2,7 +2,7 @@ namespace taraxa::network::tarcap { -const std::shared_ptr& PacketsHandler::getSpecificHandler(SubprotocolPacketType packet_type) const { +const std::shared_ptr& PacketsHandler::getSpecificHandler(SubprotocolPacketType packet_type) const { auto selected_handler = packets_handlers_.find(packet_type); if (selected_handler == packets_handlers_.end()) { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp index e856b8f209..0755cdb6c8 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp @@ -13,144 +13,59 @@ DagBlockPacketHandler::DagBlockPacketHandler(const FullNodeConfig &conf, std::sh std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, std::shared_ptr db, - bool trxs_in_dag_packet, const addr_t &node_addr, - const std::string &logs_prefix) + const addr_t &node_addr, const std::string &logs_prefix) : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, logs_prefix + "DAG_BLOCK_PH"), - trx_mgr_(std::move(trx_mgr)), - kTrxsInDagPacket(trxs_in_dag_packet) {} + trx_mgr_(std::move(trx_mgr)) {} -void DagBlockPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - constexpr size_t required_size_v2 = 8; - constexpr size_t required_size = 2; - // Only one dag block can be received - if (kTrxsInDagPacket && packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } else if (!kTrxsInDagPacket && packet_data.rlp_.itemCount() != required_size_v2) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size_v2); - } -} - -void DagBlockPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { - std::unordered_map> transactions; - auto dag_rlp = packet_data.rlp_; - if (packet_data.rlp_.itemCount() == 2) { - const auto trx_count = packet_data.rlp_[0].itemCount(); - transactions.reserve(trx_count); +void DagBlockPacketHandler::process(DagBlockPacket &&packet, const std::shared_ptr &peer) { + blk_hash_t const hash = packet.dag_block->getHash(); - for (const auto tx_rlp : packet_data.rlp_[0]) { - try { - auto trx = std::make_shared(tx_rlp); - peer->markTransactionAsKnown(trx->getHash()); - transactions.emplace(trx->getHash(), std::move(trx)); - } catch (const Transaction::InvalidTransaction &e) { - throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); - } - } - dag_rlp = packet_data.rlp_[1]; + for (const auto &tx : packet.transactions) { + peer->markTransactionAsKnown(tx->getHash()); } - DagBlock block(dag_rlp); - blk_hash_t const hash = block.getHash(); - peer->markDagBlockAsKnown(hash); - if (block.getLevel() > peer->dag_level_) { - peer->dag_level_ = block.getLevel(); + if (packet.dag_block->getLevel() > peer->dag_level_) { + peer->dag_level_ = packet.dag_block->getLevel(); } // Do not process this block in case we already have it - if (dag_mgr_->isDagBlockKnown(block.getHash())) { + if (dag_mgr_->isDagBlockKnown(packet.dag_block->getHash())) { LOG(log_tr_) << "Received known DagBlockPacket " << hash << "from: " << peer->getId(); return; } - onNewBlockReceived(std::move(block), peer, transactions); -} - -void DagBlockPacketHandler::sendBlock(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, - const SharedTransactions &trxs) { - std::shared_ptr peer = peers_state_->getPeer(peer_id); - if (!peer) { - LOG(log_wr_) << "Send dag block " << block.getHash() << ". Failed to obtain peer " << peer_id; - return; + std::unordered_map> txs_map; + txs_map.reserve(packet.transactions.size()); + for (const auto &tx : packet.transactions) { + txs_map.emplace(tx->getHash(), tx); } - // This lock prevents race condition between syncing and gossiping dag blocks - std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); - - // Transactions are first sent in transactions packet before sending the block - uint32_t index = 0; - while (index < trxs.size()) { - const uint32_t trx_count_to_send = std::min(static_cast(kMaxTransactionsInPacket), trxs.size() - index); - - dev::RLPStream s(TransactionPacketHandler::kTransactionPacketItemCount); - s.appendList(trx_count_to_send); - - taraxa::bytes trx_bytes; - for (uint32_t i = index; i < index + trx_count_to_send; i++) { - auto trx_data = trxs[i]->rlp(); - s << trxs[i]->getHash(); - trx_bytes.insert(trx_bytes.end(), std::begin(trx_data), std::end(trx_data)); - } - - s.appendList(trx_count_to_send); - s.appendRaw(trx_bytes, trx_count_to_send); - sealAndSend(peer_id, TransactionPacket, std::move(s)); - - index += trx_count_to_send; - } - - if (!sealAndSend(peer_id, DagBlockPacket, block.streamRLP(true))) { - LOG(log_wr_) << "Sending DagBlock " << block.getHash() << " failed to " << peer_id; - return; - } - - // Mark data as known if sending was successful - peer->markDagBlockAsKnown(block.getHash()); - for (const auto &trx : trxs) { - peer->markTransactionAsKnown(trx->getHash()); - } + onNewBlockReceived(std::move(packet.dag_block), peer, txs_map); } -void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, - const SharedTransactions &trxs) { - std::shared_ptr peer = peers_state_->getPeer(peer_id); - if (!peer) { - LOG(log_wr_) << "Send dag block " << block.getHash() << ". Failed to obtain peer " << peer_id; - return; - } - - dev::RLPStream s(2); - +void DagBlockPacketHandler::sendBlockWithTransactions(const std::shared_ptr &peer, + const std::shared_ptr &block, + SharedTransactions &&trxs) { // This lock prevents race condition between syncing and gossiping dag blocks std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); - taraxa::bytes trx_bytes; - for (uint32_t i = 0; i < trxs.size(); i++) { - auto trx_data = trxs[i]->rlp(); - trx_bytes.insert(trx_bytes.end(), std::begin(trx_data), std::end(trx_data)); - } - - s.appendList(trxs.size()); - s.appendRaw(trx_bytes, trxs.size()); - - s.appendRaw(block.rlp(true)); - - if (!sealAndSend(peer_id, DagBlockPacket, std::move(s))) { - LOG(log_wr_) << "Sending DagBlock " << block.getHash() << " failed to " << peer_id; + DagBlockPacket dag_block_packet{.transactions = std::move(trxs), .dag_block = block}; + if (!sealAndSend(peer->getId(), SubprotocolPacketType::kDagBlockPacket, encodePacketRlp(dag_block_packet))) { + LOG(log_wr_) << "Sending DagBlock " << block->getHash() << " failed to " << peer->getId(); return; } // Mark data as known if sending was successful - peer->markDagBlockAsKnown(block.getHash()); + peer->markDagBlockAsKnown(block->getHash()); } void DagBlockPacketHandler::onNewBlockReceived( - DagBlock &&block, const std::shared_ptr &peer, + std::shared_ptr &&block, const std::shared_ptr &peer, const std::unordered_map> &trxs) { - const auto block_hash = block.getHash(); + const auto block_hash = block->getHash(); auto verified = dag_mgr_->verifyBlock(block, trxs); switch (verified.first) { case DagManager::VerifyBlockReturnType::IncorrectTransactionsEstimation: @@ -213,7 +128,7 @@ void DagBlockPacketHandler::onNewBlockReceived( } break; case DagManager::VerifyBlockReturnType::Verified: { - auto status = dag_mgr_->addDagBlock(std::move(block), std::move(verified.second)); + auto status = dag_mgr_->addDagBlock(block, std::move(verified.second)); if (!status.first) { LOG(log_dg_) << "Received DagBlockPacket " << block_hash << "from: " << peer->getId(); // Ignore new block packets when pbft syncing @@ -225,9 +140,9 @@ void DagBlockPacketHandler::onNewBlockReceived( if (peer->peer_dag_synced_) { std::ostringstream err_msg; if (status.second.size() > 0) - err_msg << "DagBlock" << block.getHash() << " has missing pivot or/and tips " << status.second; + err_msg << "DagBlock" << block->getHash() << " has missing pivot or/and tips " << status.second; else - err_msg << "DagBlock" << block.getHash() << " could not be added to DAG"; + err_msg << "DagBlock" << block->getHash() << " could not be added to DAG"; throw MaliciousPeerException(err_msg.str()); } else { // peer_dag_synced_ flag ensures that this can only be performed once for a peer @@ -241,15 +156,16 @@ void DagBlockPacketHandler::onNewBlockReceived( } } -void DagBlockPacketHandler::onNewBlockVerified(const DagBlock &block, bool proposed, const SharedTransactions &trxs) { +void DagBlockPacketHandler::onNewBlockVerified(const std::shared_ptr &block, bool proposed, + const SharedTransactions &trxs) { // If node is pbft syncing and block is not proposed by us, this is an old block that has been verified - no block // gossip is needed if (!proposed && pbft_syncing_state_->isDeepPbftSyncing()) { return; } - const auto &block_hash = block.getHash(); - LOG(log_tr_) << "Verified NewBlock " << block_hash.toString(); + const auto &block_hash = block->getHash(); + LOG(log_tr_) << "Verified dag block " << block_hash.toString(); std::vector peers_to_send; for (auto const &peer : peers_state_->getAllPeers()) { @@ -258,48 +174,39 @@ void DagBlockPacketHandler::onNewBlockVerified(const DagBlock &block, bool propo } } - std::string peer_and_transactions_to_log; // Sending it in same order favours some peers over others, always start with a different position const auto peers_to_send_count = peers_to_send.size(); - if (peers_to_send_count > 0) { - uint32_t start_with = rand() % peers_to_send_count; - for (uint32_t i = 0; i < peers_to_send_count; i++) { - auto peer_id = peers_to_send[(start_with + i) % peers_to_send_count]; - dev::RLPStream ts; - auto peer = peers_state_->getPeer(peer_id); - if (peer && !peer->syncing_) { - peer_and_transactions_to_log += " Peer: " + peer->getId().abridged() + " Trxs: "; + if (peers_to_send_count == 0) { + return; + } - SharedTransactions transactions_to_send; - for (const auto &trx : trxs) { - const auto &trx_hash = trx->getHash(); - if (peer->isTransactionKnown(trx_hash)) { - continue; - } - transactions_to_send.push_back(trx); - peer_and_transactions_to_log += trx_hash.abridged(); - } + std::string peer_and_transactions_to_log; + uint32_t start_with = rand() % peers_to_send_count; + for (uint32_t i = 0; i < peers_to_send_count; i++) { + auto peer_id = peers_to_send[(start_with + i) % peers_to_send_count]; + auto peer = peers_state_->getPeer(peer_id); + if (!peer || peer->syncing_) { + continue; + } - for (const auto &trx : trxs) { - assert(trx != nullptr); - const auto trx_hash = trx->getHash(); - if (peer->isTransactionKnown(trx_hash)) { - continue; - } + peer_and_transactions_to_log += " Peer: " + peer->getId().abridged() + " Trxs: "; - transactions_to_send.push_back(trx); - peer_and_transactions_to_log += trx_hash.abridged(); - } - if (kTrxsInDagPacket) { - sendBlockWithTransactions(peer_id, block, transactions_to_send); - } else { - sendBlock(peer_id, block, transactions_to_send); - } - peer->markDagBlockAsKnown(block_hash); + SharedTransactions transactions_to_send; + for (const auto &trx : trxs) { + assert(trx != nullptr); + const auto trx_hash = trx->getHash(); + if (peer->isTransactionKnown(trx_hash)) { + continue; } + + transactions_to_send.push_back(trx); + peer_and_transactions_to_log += trx_hash.abridged(); } + + sendBlockWithTransactions(peer, block, std::move(transactions_to_send)); } - LOG(log_dg_) << "Send DagBlock " << block.getHash() << " to peers: " << peer_and_transactions_to_log; - if (!peers_to_send.empty()) LOG(log_tr_) << "Sent block to " << peers_to_send.size() << " peers"; + + LOG(log_dg_) << "Send DagBlock " << block->getHash() << " to peers: " << peer_and_transactions_to_log; + LOG(log_tr_) << "Sent block to " << peers_to_send.size() << " peers"; } } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp index 9ceb0edf39..ca002eba3b 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp @@ -20,102 +20,77 @@ DagSyncPacketHandler::DagSyncPacketHandler(const FullNodeConfig& conf, std::shar logs_prefix + "DAG_SYNC_PH"), trx_mgr_(std::move(trx_mgr)) {} -void DagSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData& packet_data) const { - if (constexpr size_t required_size = 4; packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} - -void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) { - auto it = packet_data.rlp_.begin(); - const auto request_period = (*it++).toInt(); - const auto response_period = (*it++).toInt(); - +void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr& peer) { // If the periods did not match restart syncing - if (response_period > request_period) { - LOG(log_dg_) << "Received DagSyncPacket with mismatching periods: " << response_period << " " << request_period - << " from " << packet_data.from_node_id_.abridged(); - if (peer->pbft_chain_size_ < response_period) { - peer->pbft_chain_size_ = response_period; + if (packet.response_period > packet.request_period) { + LOG(log_dg_) << "Received DagSyncPacket with mismatching periods: " << packet.response_period << " " + << packet.request_period << " from " << peer->getId(); + if (peer->pbft_chain_size_ < packet.response_period) { + peer->pbft_chain_size_ = packet.response_period; } peer->peer_dag_syncing_ = false; // We might be behind, restart pbft sync if needed startSyncingPbft(); return; - } else if (response_period < request_period) { + } else if (packet.response_period < packet.request_period) { // This should not be possible for honest node std::ostringstream err_msg; - err_msg << "Received DagSyncPacket with mismatching periods: response_period(" << response_period - << ") != request_period(" << request_period << ")"; + err_msg << "Received DagSyncPacket with mismatching periods: response_period(" << packet.response_period + << ") != request_period(" << packet.request_period << ")"; throw MaliciousPeerException(err_msg.str()); } std::vector transactions_to_log; - std::unordered_map> transactions; - const auto trx_count = (*it).itemCount(); - transactions.reserve(trx_count); - transactions_to_log.reserve(trx_count); - - for (const auto tx_rlp : (*it++)) { - try { - auto trx = std::make_shared(tx_rlp); - peer->markTransactionAsKnown(trx->getHash()); - transactions.emplace(trx->getHash(), std::move(trx)); - } catch (const Transaction::InvalidTransaction& e) { - throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); - } - } - - std::vector dag_blocks; - std::vector dag_blocks_to_log; - dag_blocks.reserve((*it).itemCount()); - dag_blocks_to_log.reserve((*it).itemCount()); - - for (const auto block_rlp : *it) { - DagBlock block(block_rlp); - peer->markDagBlockAsKnown(block.getHash()); - if (dag_mgr_->isDagBlockKnown(block.getHash())) { - LOG(log_tr_) << "Received known DagBlock " << block.getHash() << "from: " << peer->getId(); - continue; - } - dag_blocks.emplace_back(std::move(block)); - } - - for (auto& trx : transactions) { - transactions_to_log.push_back(trx.first); - if (trx_mgr_->isTransactionKnown(trx.first)) { + std::unordered_map> transactions_map; + transactions_to_log.reserve(packet.transactions.size()); + transactions_map.reserve(packet.transactions.size()); + for (auto& trx : packet.transactions) { + const auto tx_hash = trx->getHash(); + peer->markTransactionAsKnown(tx_hash); + transactions_to_log.push_back(tx_hash); + transactions_map.emplace(tx_hash, trx); + + if (trx_mgr_->isTransactionKnown(tx_hash)) { continue; } - auto [verified, reason] = trx_mgr_->verifyTransaction(trx.second); + auto [verified, reason] = trx_mgr_->verifyTransaction(trx); if (!verified) { std::ostringstream err_msg; - err_msg << "DagBlock transaction " << trx.first << " validation failed: " << reason; + err_msg << "DagBlock transaction " << tx_hash << " validation failed: " << reason; throw MaliciousPeerException(err_msg.str()); } } - for (auto& block : dag_blocks) { - dag_blocks_to_log.push_back(block.getHash()); + std::vector dag_blocks_to_log; + dag_blocks_to_log.reserve(packet.dag_blocks.size()); + for (auto& block : packet.dag_blocks) { + dag_blocks_to_log.push_back(block->getHash()); + peer->markDagBlockAsKnown(block->getHash()); + + if (dag_mgr_->isDagBlockKnown(block->getHash())) { + LOG(log_tr_) << "Received known DagBlock " << block->getHash() << "from: " << peer->getId(); + continue; + } - auto verified = dag_mgr_->verifyBlock(block, transactions); + auto verified = dag_mgr_->verifyBlock(block, transactions_map); if (verified.first != DagManager::VerifyBlockReturnType::Verified) { std::ostringstream err_msg; - err_msg << "DagBlock " << block.getHash() << " failed verification with error code " + err_msg << "DagBlock " << block->getHash() << " failed verification with error code " << static_cast(verified.first); throw MaliciousPeerException(err_msg.str()); } - if (block.getLevel() > peer->dag_level_) peer->dag_level_ = block.getLevel(); + if (block->getLevel() > peer->dag_level_) peer->dag_level_ = block->getLevel(); - auto status = dag_mgr_->addDagBlock(std::move(block), std::move(verified.second)); + auto status = dag_mgr_->addDagBlock(block, std::move(verified.second)); if (!status.first) { std::ostringstream err_msg; if (status.second.size() > 0) - err_msg << "DagBlock" << block.getHash() << " has missing pivot or/and tips " << status.second; + err_msg << "DagBlock" << block->getHash() << " has missing pivot or/and tips " << status.second; else - err_msg << "DagBlock" << block.getHash() << " could not be added to DAG"; + err_msg << "DagBlock" << block->getHash() << " could not be added to DAG"; throw MaliciousPeerException(err_msg.str()); } } @@ -126,7 +101,7 @@ void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, co peer->peer_dag_syncing_ = false; LOG(log_dg_) << "Received DagSyncPacket with blocks: " << dag_blocks_to_log - << " Transactions: " << transactions_to_log << " from " << packet_data.from_node_id_; + << " Transactions: " << transactions_to_log << " from " << peer->getId(); } } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp index a841fbac42..bf1a60e231 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp @@ -1,6 +1,7 @@ #include "network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp" #include "dag/dag_manager.hpp" +#include "network/tarcap/packets/latest/dag_sync_packet.hpp" #include "transaction/transaction_manager.hpp" namespace taraxa::network::tarcap { @@ -15,19 +16,13 @@ GetDagSyncPacketHandler::GetDagSyncPacketHandler(const FullNodeConfig &conf, std dag_mgr_(std::move(dag_mgr)), db_(std::move(db)) {} -void GetDagSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (constexpr size_t required_size = 2; packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} - -void GetDagSyncPacketHandler::process(const threadpool::PacketData &packet_data, +void GetDagSyncPacketHandler::process(GetDagSyncPacket &&packet, [[maybe_unused]] const std::shared_ptr &peer) { if (!peer->requestDagSyncingAllowed()) { // This should not be possible for honest node // Each node should perform dag syncing only when allowed std::ostringstream err_msg; - err_msg << "Received multiple GetDagSyncPackets from " << packet_data.from_node_id_.abridged(); + err_msg << "Received multiple GetDagSyncPackets from " << peer->getId().abridged(); throw MaliciousPeerException(err_msg.str()); } @@ -35,21 +30,19 @@ void GetDagSyncPacketHandler::process(const threadpool::PacketData &packet_data, // This lock prevents race condition between syncing and gossiping dag blocks std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); - std::unordered_set blocks_hashes; - auto it = packet_data.rlp_.begin(); - const auto peer_period = (*it++).toInt(); - + std::unordered_set blocks_hashes_set; std::string blocks_hashes_to_log; - for (const auto block_hash_rlp : *it) { - blk_hash_t hash = block_hash_rlp.toHash(); - blocks_hashes_to_log += hash.abridged(); - blocks_hashes.emplace(hash); + blocks_hashes_to_log.reserve(packet.blocks_hashes.size()); + for (const auto &hash : packet.blocks_hashes) { + if (blocks_hashes_set.insert(hash).second) { + blocks_hashes_to_log += hash.abridged(); + } } LOG(log_dg_) << "Received GetDagSyncPacket: " << blocks_hashes_to_log << " from " << peer->getId(); - auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(blocks_hashes); - if (peer_period == period) { + auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(blocks_hashes_set); + if (packet.peer_period == period) { peer->syncing_ = false; peer->peer_requested_dag_syncing_ = true; peer->peer_requested_dag_syncing_time_ = @@ -59,7 +52,7 @@ void GetDagSyncPacketHandler::process(const threadpool::PacketData &packet_data, blocks.clear(); transactions.clear(); } - sendBlocks(packet_data.from_node_id_, std::move(blocks), std::move(transactions), peer_period, period); + sendBlocks(peer->getId(), std::move(blocks), std::move(transactions), packet.peer_period, period); } void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, @@ -69,21 +62,8 @@ void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, auto peer = peers_state_->getPeer(peer_id); if (!peer) return; - dev::RLPStream s(4); - s.append(request_period); - s.append(period); - - s.appendList(transactions.size()); - for (const auto &tx : transactions) { - s.appendRaw(tx->rlp()); - } - - s.appendList(blocks.size()); - for (const auto &block : blocks) { - s.appendRaw(block->rlp(true)); - } - - sealAndSend(peer_id, SubprotocolPacketType::DagSyncPacket, std::move(s)); + DagSyncPacket dag_sync_packet(request_period, period, std::move(transactions), std::move(blocks)); + sealAndSend(peer_id, SubprotocolPacketType::kDagSyncPacket, encodePacketRlp(dag_sync_packet)); } } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.cpp index b7443b6e5d..5d2e55c4e9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.cpp @@ -14,25 +14,16 @@ GetNextVotesBundlePacketHandler::GetNextVotesBundlePacketHandler( std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, logs_prefix + "GET_NEXT_VOTES_BUNDLE_PH") {} -void GetNextVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (constexpr size_t required_size = 2; packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} - -void GetNextVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, +void GetNextVotesBundlePacketHandler::process(GetNextVotesBundlePacket &&packet, const std::shared_ptr &peer) { LOG(log_dg_) << "Received GetNextVotesSyncPacket request"; - - const PbftPeriod peer_pbft_period = packet_data.rlp_[0].toInt(); - const PbftRound peer_pbft_round = packet_data.rlp_[1].toInt(); const auto [pbft_round, pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); // Send votes only for current_period == peer_period && current_period >= peer_round - if (pbft_period != peer_pbft_period || pbft_round == 1 || pbft_round < peer_pbft_round) { + if (pbft_period != packet.peer_pbft_period || pbft_round == 1 || pbft_round < packet.peer_pbft_round) { LOG(log_nf_) << "No previous round next votes sync packet will be sent. pbft_period " << pbft_period - << ", peer_pbft_period " << peer_pbft_period << ", pbft_round " << pbft_round << ", peer_pbft_round " - << peer_pbft_round; + << ", peer_pbft_period " << packet.peer_pbft_period << ", pbft_round " << pbft_round + << ", peer_pbft_round " << packet.peer_pbft_round; return; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp index e8af16d529..26c3d46c75 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp @@ -1,5 +1,6 @@ #include "network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets/latest/pbft_sync_packet.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "pbft/pbft_chain.hpp" #include "storage/storage.hpp" @@ -22,46 +23,38 @@ GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, s vote_mgr_(std::move(vote_mgr)), db_(std::move(db)) {} -void GetPbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (constexpr size_t required_size = 1; packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} - -void GetPbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { +void GetPbftSyncPacketHandler::process(GetPbftSyncPacket &&packet, const std::shared_ptr &peer) { LOG(log_tr_) << "Received GetPbftSyncPacket Block"; - const size_t height_to_sync = packet_data.rlp_[0].toInt(); // Here need PBFT chain size, not synced period since synced blocks has not verified yet. const size_t my_chain_size = pbft_chain_->getPbftChainSize(); - if (height_to_sync > my_chain_size) { + if (packet.height_to_sync > my_chain_size) { // Node update peers PBFT chain size in status packet. Should not request syncing period bigger than pbft chain size std::ostringstream err_msg; - err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + err_msg << "Peer " << peer->getId() << " request syncing period start at " << packet.height_to_sync << ". That's bigger than own PBFT chain size " << my_chain_size; throw MaliciousPeerException(err_msg.str()); } - if (kConf.is_light_node && height_to_sync + kConf.light_node_history <= my_chain_size) { + if (kConf.is_light_node && packet.height_to_sync + kConf.light_node_history <= my_chain_size) { std::ostringstream err_msg; - err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + err_msg << "Peer " << peer->getId() << " request syncing period start at " << packet.height_to_sync << ". Light node does not have the data " << my_chain_size; throw MaliciousPeerException(err_msg.str()); } size_t blocks_to_transfer = 0; auto pbft_chain_synced = false; - const auto total_period_data_size = my_chain_size - height_to_sync + 1; + const auto total_period_data_size = my_chain_size - packet.height_to_sync + 1; if (total_period_data_size <= kConf.network.sync_level_size) { blocks_to_transfer = total_period_data_size; pbft_chain_synced = true; } else { blocks_to_transfer = kConf.network.sync_level_size; } - LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << packet_data.from_node_id_; + LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << peer->getId(); - sendPbftBlocks(peer, height_to_sync, blocks_to_transfer, pbft_chain_synced); + sendPbftBlocks(peer, packet.height_to_sync, blocks_to_transfer, pbft_chain_synced); } // api for pbft syncing @@ -73,38 +66,32 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr for (auto block_period = from_period; block_period < from_period + blocks_to_transfer; block_period++) { bool last_block = (block_period == from_period + blocks_to_transfer - 1); - auto data = db_->getPeriodDataRaw(block_period); - - if (data.size() == 0) { + auto period_data = db_->getPeriodData(block_period); + if (!period_data.has_value()) { // This can happen when switching from light node to full node setting LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; return; } - dev::RLPStream s; + std::shared_ptr pbft_sync_packet; + if (pbft_chain_synced && last_block) { // Latest finalized block cert votes are saved in db as reward votes for new blocks - const auto reward_votes = vote_mgr_->getRewardVotes(); + auto reward_votes = vote_mgr_->getRewardVotes(); assert(!reward_votes.empty()); // It is possible that the node pushed another block to the chain in the meantime if (reward_votes[0]->getPeriod() == block_period) { - s.appendList(3); - s << last_block; - s.appendRaw(data); - s.appendRaw(encodePbftVotesBundleRlp(reward_votes)); + pbft_sync_packet = std::make_shared(last_block, std::move(*period_data), + OptimizedPbftVotesBundle{std::move(reward_votes)}); } else { - s.appendList(2); - s << last_block; - s.appendRaw(data); + pbft_sync_packet = std::make_shared(last_block, std::move(*period_data)); } } else { - s.appendList(2); - s << last_block; - s.appendRaw(data); + pbft_sync_packet = std::make_shared(last_block, std::move(*period_data)); } LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; - sealAndSend(peer_id, SubprotocolPacketType::PbftSyncPacket, std::move(s)); + sealAndSend(peer_id, SubprotocolPacketType::kPbftSyncPacket, encodePacketRlp(pbft_sync_packet)); if (pbft_chain_synced && last_block) { peer->syncing_ = false; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp index cc71e189e9..9c36e9d304 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp @@ -1,5 +1,6 @@ #include "network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp" #include "network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp" namespace taraxa::network::tarcap { @@ -13,35 +14,27 @@ GetPillarVotesBundlePacketHandler::GetPillarVotesBundlePacketHandler( logs_prefix + "GET_PILLAR_VOTES_BUNDLE_PH"), pillar_chain_manager_(std::move(pillar_chain_manager)) {} -void GetPillarVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items != kGetPillarVotesBundlePacketSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kGetPillarVotesBundlePacketSize); - } -} - -void GetPillarVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, +void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&packet, const std::shared_ptr &peer) { LOG(log_dg_) << "GetPillarVotesBundlePacketHandler received from peer " << peer->getId(); - const PbftPeriod period = packet_data.rlp_[0].toInt(); - const blk_hash_t pillar_block_hash = packet_data.rlp_[1].toHash(); - if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(period)) { + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(packet.period)) { std::ostringstream err_msg; - err_msg << "Pillar votes bundle request for period " << period << ", ficus hardfork block num " + err_msg << "Pillar votes bundle request for period " << packet.period << ", ficus hardfork block num " << kConf.genesis.state.hardforks.ficus_hf.block_num; throw MaliciousPeerException(err_msg.str()); } - if (!kConf.genesis.state.hardforks.ficus_hf.isPbftWithPillarBlockPeriod(period)) { + if (!kConf.genesis.state.hardforks.ficus_hf.isPbftWithPillarBlockPeriod(packet.period)) { std::ostringstream err_msg; - err_msg << "Pillar votes bundle request for period " << period << ". Wrong requested period"; + err_msg << "Pillar votes bundle request for period " << packet.period << ". Wrong requested period"; throw MaliciousPeerException(err_msg.str()); } - const auto votes = pillar_chain_manager_->getVerifiedPillarVotes(period, pillar_block_hash); + const auto votes = pillar_chain_manager_->getVerifiedPillarVotes(packet.period, packet.pillar_block_hash); if (votes.empty()) { - LOG(log_dg_) << "No pillar votes for period " << period << "and pillar block hash " << pillar_block_hash; + LOG(log_dg_) << "No pillar votes for period " << packet.period << "and pillar block hash " + << packet.pillar_block_hash; return; } // Check if the votes size exceeds the maximum limit and split into multiple packets if needed @@ -53,22 +46,24 @@ void GetPillarVotesBundlePacketHandler::process(const threadpool::PacketData &pa const size_t chunk_size = std::min(PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp, total_votes - votes_sent); - // Create a new RLPStream for the chunk - dev::RLPStream s(chunk_size); + // Create PillarVotesBundlePacket + std::vector> pillar_votes; + pillar_votes.reserve(chunk_size); for (size_t i = 0; i < chunk_size; ++i) { - const auto &sig = votes[votes_sent + i]; - s.appendRaw(sig->rlp()); + pillar_votes.emplace_back(votes[votes_sent + i]); } + PillarVotesBundlePacket pillar_votes_bundle_packet(OptimizedPillarVotesBundle{std::move(pillar_votes)}); // Seal and send the chunk to the peer - if (sealAndSend(peer->getId(), SubprotocolPacketType::PillarVotesBundlePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotesBundlePacket, + encodePacketRlp(pillar_votes_bundle_packet))) { // Mark the votes in this chunk as known for (size_t i = 0; i < chunk_size; ++i) { peer->markPillarVoteAsKnown(votes[votes_sent + i]->getHash()); } - LOG(log_nf_) << "Pillar votes bundle for period " << period << ", hash " << pillar_block_hash << " sent to " - << peer->getId() << " (Chunk " + LOG(log_nf_) << "Pillar votes bundle for period " << packet.period << ", hash " << packet.pillar_block_hash + << " sent to " << peer->getId() << " (Chunk " << (votes_sent / PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp) + 1 << "/" << (total_votes + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp - 1) / PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp @@ -82,11 +77,8 @@ void GetPillarVotesBundlePacketHandler::process(const threadpool::PacketData &pa void GetPillarVotesBundlePacketHandler::requestPillarVotesBundle(PbftPeriod period, const blk_hash_t &pillar_block_hash, const std::shared_ptr &peer) { - dev::RLPStream s(kGetPillarVotesBundlePacketSize); - s << period; - s << pillar_block_hash; - - if (sealAndSend(peer->getId(), SubprotocolPacketType::GetPillarVotesBundlePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kGetPillarVotesBundlePacket, + encodePacketRlp(GetPillarVotesBundlePacket(period, pillar_block_hash)))) { LOG(log_nf_) << "Requested pillar votes bundle for period " << period << " and pillar block " << pillar_block_hash << " from peer " << peer->getId(); } else { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index 561217b6e6..93ebe44028 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -22,66 +22,38 @@ PbftSyncPacketHandler::PbftSyncPacketHandler(const FullNodeConfig &conf, std::sh vote_mgr_(std::move(vote_mgr)), periodic_events_tp_(1, true) {} -void PbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (packet_data.rlp_.itemCount() != kStandardPacketSize && packet_data.rlp_.itemCount() != kChainSyncedPacketSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), kStandardPacketSize); - } - - // PeriodData rlp parsing cannot be done through util::rlp_tuple, which automatically checks the rlp size so it is - // checked here manually - if (packet_data.rlp_[1].itemCount() != PeriodData::kBaseRlpItemCount && - packet_data.rlp_[1].itemCount() != PeriodData::kExtendedRlpItemCount) { - throw InvalidRlpItemsCountException(packet_data.type_str_ + ":PeriodData", packet_data.rlp_[1].itemCount(), - PeriodData::kBaseRlpItemCount); - } -} - -void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { +void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_ptr &peer) { // Note: no need to consider possible race conditions due to concurrent processing as it is // disabled on priority_queue blocking dependencies level const auto syncing_peer = pbft_syncing_state_->syncingPeer(); if (!syncing_peer) { - LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << peer->getId().abridged() << " but there is no current syncing peer set"; return; } - if (syncing_peer->getId() != packet_data.from_node_id_) { - LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() + if (syncing_peer->getId() != peer->getId()) { + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << peer->getId().abridged() << " current syncing peer " << syncing_peer->getId().abridged(); return; } // Process received pbft blocks // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain - const bool pbft_chain_synced = packet_data.rlp_.itemCount() == kChainSyncedPacketSize; - // last_block is the flag to indicate this is the last block in each syncing round, doesn't mean PBFT chain has synced - const bool last_block = packet_data.rlp_[0].toInt(); - PeriodData period_data; - try { - period_data = decodePeriodData(packet_data.rlp_[1]); - } catch (const std::runtime_error &e) { - throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); - } - - std::vector> current_block_cert_votes; - if (pbft_chain_synced) { - current_block_cert_votes = decodeVotesBundle(packet_data.rlp_[2]); - } - const auto pbft_blk_hash = period_data.pbft_blk->getBlockHash(); + const bool pbft_chain_synced = packet.current_block_cert_votes_bundle.has_value(); + const auto pbft_blk_hash = packet.period_data.pbft_blk->getBlockHash(); std::string received_dag_blocks_str; // This is just log related stuff - for (auto const &block : period_data.dag_blocks) { - received_dag_blocks_str += block.getHash().toString() + " "; - if (peer->dag_level_ < block.getLevel()) { - peer->dag_level_ = block.getLevel(); + for (auto const &block : packet.period_data.dag_blocks) { + received_dag_blocks_str += block->getHash().toString() + " "; + if (peer->dag_level_ < block->getLevel()) { + peer->dag_level_ = block->getLevel(); } } - const auto pbft_block_period = period_data.pbft_blk->getPeriod(); + const auto pbft_block_period = packet.period_data.pbft_blk->getPeriod(); LOG(log_dg_) << "PbftSyncPacket received. Period: " << pbft_block_period - << ", dag Blocks: " << received_dag_blocks_str << " from " << packet_data.from_node_id_; + << ", dag Blocks: " << received_dag_blocks_str << " from " << peer->getId(); peer->markPbftBlockAsKnown(pbft_blk_hash); // Update peer's pbft period if outdated @@ -92,8 +64,8 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, LOG(log_tr_) << "Processing pbft block: " << pbft_blk_hash; if (pbft_chain_->findPbftBlockInChain(pbft_blk_hash)) { - LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << period_data.pbft_blk->getPeriod() << " from " - << packet_data.from_node_id_ << " already present in chain"; + LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << packet.period_data.pbft_blk->getPeriod() + << " from " << peer->getId() << " already present in chain"; } else { if (pbft_block_period != pbft_mgr_->pbftSyncingPeriod() + 1) { // This can happen if we just got synced and block was cert voted @@ -109,11 +81,11 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, // Check cert vote matches if final synced block if (pbft_chain_synced) { - for (auto const &vote : current_block_cert_votes) { + for (auto const &vote : packet.current_block_cert_votes_bundle->votes) { if (vote->getBlockHash() != pbft_blk_hash) { LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " << pbft_blk_hash - << " from peer " << packet_data.from_node_id_.abridged() << " received, stop syncing."; - handleMaliciousSyncPeer(packet_data.from_node_id_); + << " from peer " << peer->getId().abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(peer->getId()); return; } } @@ -122,52 +94,50 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, // Check votes match the hash of previous block in the queue auto last_pbft_block_hash = pbft_mgr_->lastPbftBlockHashFromQueueOrChain(); // Check cert vote matches - for (auto const &vote : period_data.previous_block_cert_votes) { + for (auto const &vote : packet.period_data.previous_block_cert_votes) { if (vote->getBlockHash() != last_pbft_block_hash) { LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " - << last_pbft_block_hash << " from peer " << packet_data.from_node_id_.abridged() - << " received, stop syncing."; - handleMaliciousSyncPeer(packet_data.from_node_id_); + << last_pbft_block_hash << " from peer " << peer->getId().abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(peer->getId()); return; } } - if (!pbft_mgr_->validatePillarDataInPeriodData(period_data)) { - handleMaliciousSyncPeer(packet_data.from_node_id_); + if (!pbft_mgr_->validatePillarDataInPeriodData(packet.period_data)) { + handleMaliciousSyncPeer(peer->getId()); return; } - auto order_hash = PbftManager::calculateOrderHash(period_data.dag_blocks); - if (order_hash != period_data.pbft_blk->getOrderHash()) { + auto order_hash = PbftManager::calculateOrderHash(packet.period_data.dag_blocks); + if (order_hash != packet.period_data.pbft_blk->getOrderHash()) { { // This is just log related stuff std::vector trx_order; - trx_order.reserve(period_data.transactions.size()); + trx_order.reserve(packet.period_data.transactions.size()); std::vector blk_order; - blk_order.reserve(period_data.dag_blocks.size()); - for (auto t : period_data.transactions) { + blk_order.reserve(packet.period_data.dag_blocks.size()); + for (auto t : packet.period_data.transactions) { trx_order.push_back(t->getHash()); } - for (auto b : period_data.dag_blocks) { - blk_order.push_back(b.getHash()); + for (auto b : packet.period_data.dag_blocks) { + blk_order.push_back(b->getHash()); } LOG(log_er_) << "Order hash incorrect in period data " << pbft_blk_hash << " expected: " << order_hash - << " received " << period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order - << "; Trx order: " << trx_order << "; from " << packet_data.from_node_id_.abridged() - << ", stop syncing."; + << " received " << packet.period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order + << "; Trx order: " << trx_order << "; from " << peer->getId().abridged() << ", stop syncing."; } - handleMaliciousSyncPeer(packet_data.from_node_id_); + handleMaliciousSyncPeer(peer->getId()); return; } // This is special case when queue is empty and we can not say for sure that all votes that are part of this block // have been verified before if (pbft_mgr_->periodDataQueueEmpty()) { - for (const auto &v : period_data.previous_block_cert_votes) { + for (const auto &v : packet.period_data.previous_block_cert_votes) { if (auto vote_is_valid = vote_mgr_->validateVote(v); vote_is_valid.first == false) { - LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " - << packet_data.from_node_id_.abridged() + LOG(log_er_) << "Invalid reward votes in block " << packet.period_data.pbft_blk->getBlockHash() + << " from peer " << peer->getId().abridged() << " received, stop syncing. Validation failed. Err: " << vote_is_valid.second; - handleMaliciousSyncPeer(packet_data.from_node_id_); + handleMaliciousSyncPeer(peer->getId()); return; } @@ -175,8 +145,8 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, } // And now we need to replace it with verified votes - if (auto votes = vote_mgr_->checkRewardVotes(period_data.pbft_blk, true); votes.first) { - period_data.previous_block_cert_votes = std::move(votes.second); + if (auto votes = vote_mgr_->checkRewardVotes(packet.period_data.pbft_blk, true); votes.first) { + packet.period_data.previous_block_cert_votes = std::move(votes.second); } else { // checkRewardVotes could fail because we just cert voted this block and moved to next period, // in that case we are probably fully synced @@ -185,18 +155,21 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, return; } - LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " - << packet_data.from_node_id_.abridged() << " received, stop syncing."; - handleMaliciousSyncPeer(packet_data.from_node_id_); + LOG(log_er_) << "Invalid reward votes in block " << packet.period_data.pbft_blk->getBlockHash() << " from peer " + << peer->getId().abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(peer->getId()); return; } } LOG(log_tr_) << "Synced PBFT block hash " << pbft_blk_hash << " with " - << period_data.previous_block_cert_votes.size() << " cert votes"; - LOG(log_tr_) << "Synced PBFT block " << period_data; - pbft_mgr_->periodDataQueuePush(std::move(period_data), packet_data.from_node_id_, - std::move(current_block_cert_votes)); + << packet.period_data.previous_block_cert_votes.size() << " cert votes"; + LOG(log_tr_) << "Synced PBFT block " << packet.period_data; + std::vector> current_block_cert_votes; + if (pbft_chain_synced) { + current_block_cert_votes = std::move(packet.current_block_cert_votes_bundle->votes); + } + pbft_mgr_->periodDataQueuePush(std::move(packet.period_data), peer->getId(), std::move(current_block_cert_votes)); } auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); @@ -209,7 +182,7 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, return; } - if (last_block) { + if (packet.last_block) { // If current sync period is actually bigger than the block we just received we are probably synced if (pbft_sync_period > pbft_block_period) { pbft_syncing_state_->setPbftSyncing(false); @@ -258,7 +231,7 @@ void PbftSyncPacketHandler::pbftSyncComplete() { } } -void PbftSyncPacketHandler::delayedPbftSync(int counter) { +void PbftSyncPacketHandler::delayedPbftSync(uint32_t counter) { const uint32_t max_delayed_pbft_sync_count = 60000 / kDelayedPbftSyncDelayMs; auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); if (counter > max_delayed_pbft_sync_count) { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp index d0ef86ee62..a4ddfdd8e5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp @@ -11,25 +11,16 @@ PillarVotePacketHandler::PillarVotePacketHandler(const FullNodeConfig &conf, std : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTE_PH") {} -void PillarVotePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items != PillarVote::kStandardRlpSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, PillarVote::kStandardRlpSize); - } -} - -void PillarVotePacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { - const auto pillar_vote = std::make_shared(packet_data.rlp_); - if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { +void PillarVotePacketHandler::process(PillarVotePacket &&packet, const std::shared_ptr &peer) { + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(packet.pillar_vote->getPeriod())) { std::ostringstream err_msg; - err_msg << "Pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() + err_msg << "Pillar vote " << packet.pillar_vote->getHash() << ", period " << packet.pillar_vote->getPeriod() << " < ficus hardfork block num"; throw MaliciousPeerException(err_msg.str()); } - if (processPillarVote(pillar_vote, peer)) { - onNewPillarVote(pillar_vote); + if (processPillarVote(packet.pillar_vote, peer)) { + onNewPillarVote(packet.pillar_vote); } } @@ -51,10 +42,7 @@ void PillarVotePacketHandler::onNewPillarVote(const std::shared_ptr void PillarVotePacketHandler::sendPillarVote(const std::shared_ptr &peer, const std::shared_ptr &vote) { - dev::RLPStream s; - s.appendRaw(vote->rlp()); - - if (sealAndSend(peer->getId(), SubprotocolPacketType::PillarVotePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotePacket, encodePacketRlp(PillarVotePacket(vote)))) { peer->markPillarVoteAsKnown(vote->getHash()); LOG(log_dg_) << "Pillar vote " << vote->getHash() << ", period " << vote->getPeriod() << " sent to " << peer->getId(); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp index b9473c59c8..cbbf4e3177 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp @@ -12,20 +12,18 @@ PillarVotesBundlePacketHandler::PillarVotesBundlePacketHandler( : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTES_BUNDLE_PH") {} -void PillarVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items == 0 || items > kMaxPillarVotesInBundleRlp) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kMaxPillarVotesInBundleRlp); +void PillarVotesBundlePacketHandler::process(PillarVotesBundlePacket &&packet, + const std::shared_ptr &peer) { + if (packet.pillar_votes_bundle.pillar_votes.size() == 0 || + packet.pillar_votes_bundle.pillar_votes.size() > kMaxPillarVotesInBundleRlp) { + throw InvalidRlpItemsCountException("PillarVotesBundlePacket", packet.pillar_votes_bundle.pillar_votes.size(), + kMaxPillarVotesInBundleRlp); } -} -void PillarVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { // TODO[2744]: there could be the same protection as in pbft syncing that only requested bundle packet is accepted LOG(log_dg_) << "PillarVotesBundlePacket received from peer " << peer->getId(); - for (const auto vote_rlp : packet_data.rlp_) { - const auto pillar_vote = std::make_shared(vote_rlp); + for (const auto &pillar_vote : packet.pillar_votes_bundle.pillar_votes) { if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { std::ostringstream err_msg; err_msg << "Synced pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp index 855df89d43..4f318db6e6 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp @@ -2,6 +2,7 @@ #include "config/version.hpp" #include "dag/dag.hpp" +#include "network/tarcap/packets/latest/status_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "pbft/pbft_chain.hpp" @@ -21,105 +22,83 @@ StatusPacketHandler::StatusPacketHandler(const FullNodeConfig& conf, std::shared logs_prefix + "STATUS_PH"), kGenesisHash(genesis_hash) {} -void StatusPacketHandler::validatePacketRlpFormat(const threadpool::PacketData& packet_data) const { - if (const auto items_count = packet_data.rlp_.itemCount(); - items_count != kInitialStatusPacketItemsCount && items_count != kStandardStatusPacketItemsCount) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), - kStandardStatusPacketItemsCount); - } -} - -void StatusPacketHandler::process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) { +void StatusPacketHandler::process(StatusPacket&& packet, const std::shared_ptr& peer) { // Important !!! Use only "selected_peer" and not "peer" in this function as "peer" might be nullptr auto selected_peer = peer; const auto pbft_synced_period = pbft_mgr_->pbftSyncingPeriod(); // Initial status packet - if (packet_data.rlp_.itemCount() == kInitialStatusPacketItemsCount) { + if (packet.initial_data.has_value()) { if (!selected_peer) { - selected_peer = peers_state_->getPendingPeer(packet_data.from_node_id_); + selected_peer = peers_state_->getPendingPeer(peer->getId()); if (!selected_peer) { - LOG(log_wr_) << "Peer " << packet_data.from_node_id_.abridged() + LOG(log_wr_) << "Peer " << peer->getId().abridged() << " missing in both peers and pending peers map - will be disconnected."; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } } - auto it = packet_data.rlp_.begin(); - auto const peer_chain_id = (*it++).toInt(); - auto const peer_dag_level = (*it++).toInt(); - auto const genesis_hash = (*it++).toHash(); - auto const peer_pbft_chain_size = (*it++).toInt(); - auto const peer_syncing = (*it++).toInt(); - auto const peer_pbft_round = (*it++).toInt(); - auto const node_major_version = (*it++).toInt(); - auto const node_minor_version = (*it++).toInt(); - auto const node_patch_version = (*it++).toInt(); - auto const is_light_node = (*it++).toInt(); - auto const node_history = (*it++).toInt(); - - if (peer_chain_id != kConf.genesis.chain_id) { + if (packet.initial_data->peer_chain_id != kConf.genesis.chain_id) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) - << "Incorrect network id " << peer_chain_id << ", host " << packet_data.from_node_id_.abridged() + << "Incorrect network id " << packet.initial_data->peer_chain_id << ", host " << peer->getId().abridged() << " will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } - if (genesis_hash != kGenesisHash) { - LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) - << "Incorrect genesis hash " << genesis_hash << ", host " << packet_data.from_node_id_.abridged() + if (packet.initial_data->genesis_hash != kGenesisHash) { + LOG((peers_state_->getPeersCount()) ? log_nf_ : log_wr_) + << "Incorrect genesis hash " << packet.initial_data->genesis_hash << ", host " << peer->getId().abridged() << " will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } // If this is a light node and it cannot serve our sync request disconnect from it - if (is_light_node) { + if (packet.initial_data->is_light_node) { selected_peer->peer_light_node = true; - selected_peer->peer_light_node_history = node_history; - if (pbft_synced_period + node_history < peer_pbft_chain_size) { + selected_peer->peer_light_node_history = packet.initial_data->node_history; + if (pbft_synced_period + packet.initial_data->node_history < packet.peer_pbft_chain_size) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) - << "Light node " << packet_data.from_node_id_.abridged() - << " would not be able to serve our syncing request. " - << "Current synced period " << pbft_synced_period << ", peer synced period " << pbft_synced_period - << ", peer light node history " << node_history << ". Peer will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + << "Light node " << peer->getId().abridged() << " would not be able to serve our syncing request. " + << "Current synced period " << pbft_synced_period << ", peer synced period " << packet.peer_pbft_chain_size + << ", peer light node history " << packet.initial_data->node_history << ". Peer will be disconnected"; + disconnect(peer->getId(), dev::p2p::UserReason); return; } } - selected_peer->dag_level_ = peer_dag_level; - selected_peer->pbft_chain_size_ = peer_pbft_chain_size; - selected_peer->syncing_ = peer_syncing; - selected_peer->pbft_period_ = peer_pbft_chain_size + 1; - selected_peer->pbft_round_ = peer_pbft_round; + selected_peer->dag_level_ = packet.peer_dag_level; + selected_peer->pbft_chain_size_ = packet.peer_pbft_chain_size; + selected_peer->syncing_ = packet.peer_syncing; + selected_peer->pbft_period_ = packet.peer_pbft_chain_size + 1; + selected_peer->pbft_round_ = packet.peer_pbft_round; - peers_state_->setPeerAsReadyToSendMessages(packet_data.from_node_id_, selected_peer); + peers_state_->setPeerAsReadyToSendMessages(peer->getId(), selected_peer); - LOG(log_dg_) << "Received initial status message from " << packet_data.from_node_id_ << ", network id " - << peer_chain_id << ", peer DAG max level " << selected_peer->dag_level_ << ", genesis " - << genesis_hash << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " - << std::boolalpha << selected_peer->syncing_ << ", peer pbft period " << selected_peer->pbft_period_ - << ", peer pbft round " << selected_peer->pbft_round_ << ", node major version" << node_major_version - << ", node minor version" << node_minor_version << ", node patch version" << node_patch_version; + LOG(log_dg_) << "Received initial status message from " << peer->getId() << ", network id " + << packet.initial_data->peer_chain_id << ", peer DAG max level " << selected_peer->dag_level_ + << ", genesis " << packet.initial_data->genesis_hash << ", peer pbft chain size " + << selected_peer->pbft_chain_size_ << ", peer syncing " << std::boolalpha << selected_peer->syncing_ + << ", peer pbft period " << selected_peer->pbft_period_ << ", peer pbft round " + << selected_peer->pbft_round_ << ", node major version" << packet.initial_data->node_major_version + << ", node minor version" << packet.initial_data->node_minor_version << ", node patch version" + << packet.initial_data->node_patch_version; } else { // Standard status packet - // TODO: initial and standard status packet could be separated... if (!selected_peer) { - LOG(log_er_) << "Received standard status packet from " << packet_data.from_node_id_.abridged() + LOG(log_er_) << "Received standard status packet from " << peer->getId().abridged() << ", without previously received initial status packet. Will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } - auto it = packet_data.rlp_.begin(); - selected_peer->dag_level_ = (*it++).toInt(); - selected_peer->pbft_chain_size_ = (*it++).toInt(); + selected_peer->dag_level_ = packet.peer_dag_level; + selected_peer->pbft_chain_size_ = packet.peer_pbft_chain_size; selected_peer->pbft_period_ = selected_peer->pbft_chain_size_ + 1; - selected_peer->syncing_ = (*it++).toInt(); - selected_peer->pbft_round_ = (*it++).toInt(); + selected_peer->syncing_ = packet.peer_syncing; + selected_peer->pbft_round_ = packet.peer_pbft_round; // TODO: Address malicious status if (!pbft_syncing_state_->isPbftSyncing()) { @@ -147,7 +126,7 @@ void StatusPacketHandler::process(const threadpool::PacketData& packet_data, con } selected_peer->last_status_pbft_chain_size_ = selected_peer->pbft_chain_size_.load(); - LOG(log_dg_) << "Received status message from " << packet_data.from_node_id_ << ", peer DAG max level " + LOG(log_dg_) << "Received status message from " << peer->getId() << ", peer DAG max level " << selected_peer->dag_level_ << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " << std::boolalpha << selected_peer->syncing_ << ", peer pbft round " << selected_peer->pbft_round_; @@ -168,16 +147,15 @@ bool StatusPacketHandler::sendStatus(const dev::p2p::NodeID& node_id, bool initi if (initial) { success = sealAndSend( - node_id, StatusPacket, - std::move(dev::RLPStream(kInitialStatusPacketItemsCount) - << kConf.genesis.chain_id << dag_max_level << kGenesisHash << pbft_chain_size - << pbft_syncing_state_->isPbftSyncing() << pbft_round << TARAXA_MAJOR_VERSION << TARAXA_MINOR_VERSION - << TARAXA_PATCH_VERSION << dag_mgr_->isLightNode() << dag_mgr_->getLightNodeHistory())); + node_id, SubprotocolPacketType::kStatusPacket, + encodePacketRlp(StatusPacket( + pbft_chain_size, pbft_round, dag_max_level, pbft_syncing_state_->isPbftSyncing(), + StatusPacket::InitialData{kConf.genesis.chain_id, kGenesisHash, TARAXA_MAJOR_VERSION, TARAXA_MINOR_VERSION, + TARAXA_PATCH_VERSION, kConf.is_light_node, kConf.light_node_history}))); } else { - success = sealAndSend( - node_id, StatusPacket, - std::move(dev::RLPStream(kStandardStatusPacketItemsCount) - << dag_max_level << pbft_chain_size << pbft_syncing_state_->isDeepPbftSyncing() << pbft_round)); + success = sealAndSend(node_id, SubprotocolPacketType::kStatusPacket, + encodePacketRlp(StatusPacket(pbft_chain_size, pbft_round, dag_max_level, + pbft_syncing_state_->isDeepPbftSyncing()))); } return success; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index 63cb41449d..d4c45e2ec9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -2,6 +2,7 @@ #include +#include "network/tarcap/packets/latest/transaction_packet.hpp" #include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" @@ -10,76 +11,48 @@ namespace taraxa::network::tarcap { TransactionPacketHandler::TransactionPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr trx_mgr, const addr_t &node_addr, - bool hash_gossip, const std::string &logs_prefix) + const std::string &logs_prefix) : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, logs_prefix + "TRANSACTION_PH"), - trx_mgr_(std::move(trx_mgr)), - kHashGossip(hash_gossip) {} + trx_mgr_(std::move(trx_mgr)) {} -void TransactionPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items != kTransactionPacketItemCount) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kTransactionPacketItemCount); +inline void TransactionPacketHandler::process(TransactionPacket &&packet, const std::shared_ptr &peer) { + if (packet.transactions.size() > kMaxTransactionsInPacket) { + throw InvalidRlpItemsCountException("TransactionPacket:transactions", packet.transactions.size(), + kMaxTransactionsInPacket); } - auto hashes_count = packet_data.rlp_[0].itemCount(); - auto trx_count = packet_data.rlp_[1].itemCount(); - if (hashes_count < trx_count) { - throw InvalidRlpItemsCountException(packet_data.type_str_, hashes_count, trx_count); + if (packet.extra_transactions_hashes.size() > kMaxHashesInPacket) { + throw InvalidRlpItemsCountException("TransactionPacket:hashes", packet.extra_transactions_hashes.size(), + kMaxHashesInPacket); } - if (hashes_count == 0 || hashes_count > kMaxTransactionsInPacket + kMaxHashesInPacket) { - throw InvalidRlpItemsCountException(packet_data.type_str_, hashes_count, - kMaxTransactionsInPacket + kMaxHashesInPacket); - } - - if (trx_count > kMaxTransactionsInPacket) { - throw InvalidRlpItemsCountException(packet_data.type_str_, trx_count, kMaxTransactionsInPacket); - } -} - -inline void TransactionPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { - std::vector received_transactions; - const auto transaction_hashes_count = packet_data.rlp_[0].itemCount(); - const auto transaction_count = packet_data.rlp_[1].itemCount(); - received_transactions.reserve(transaction_count); - - std::vector trx_hashes; - trx_hashes.reserve(transaction_hashes_count); - - // First extract only transaction hashes - for (const auto trx_hash_rlp : packet_data.rlp_[0]) { - auto trx_hash = trx_hash_rlp.toHash(); - peer->markTransactionAsKnown(trx_hash); - trx_hashes.emplace_back(std::move(trx_hash)); + // Extra hashes are hashes of transactions that were not sent as full transactions due to max limit, just mark them as + // known for sender + for (const auto &extra_tx_hash : packet.extra_transactions_hashes) { + peer->markTransactionAsKnown(extra_tx_hash); } - for (size_t tx_idx = 0; tx_idx < transaction_count; tx_idx++) { - const auto &trx_hash = trx_hashes[tx_idx]; + size_t unseen_txs_count = 0; + for (auto &transaction : packet.transactions) { + const auto tx_hash = transaction->getHash(); + peer->markTransactionAsKnown(tx_hash); // Skip any transactions that are already known to the trx mgr - if (trx_mgr_->isTransactionKnown(trx_hash)) { + if (trx_mgr_->isTransactionKnown(tx_hash)) { continue; } - std::shared_ptr transaction; - // Deserialization is expensive, do it only for the transactions we are about to process - try { - transaction = std::make_shared(packet_data.rlp_[1][tx_idx].data().toBytes()); - received_transactions.emplace_back(trx_hash); - } catch (const Transaction::InvalidTransaction &e) { - throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); - } + unseen_txs_count++; const auto [verified, reason] = trx_mgr_->verifyTransaction(transaction); if (!verified) { std::ostringstream err_msg; - err_msg << "DagBlock transaction " << transaction->getHash() << " validation failed: " << reason; + err_msg << "DagBlock transaction " << tx_hash << " validation failed: " << reason; throw MaliciousPeerException(err_msg.str()); } received_trx_count_++; - const auto tx_hash = transaction->getHash(); + const auto status = trx_mgr_->insertValidatedTransaction(std::move(transaction)); if (status == TransactionStatus::Inserted) { unique_received_trx_count_++; @@ -93,50 +66,10 @@ inline void TransactionPacketHandler::process(const threadpool::PacketData &pack } } - if (transaction_count > 0) { - LOG(log_tr_) << "Received TransactionPacket with " << packet_data.rlp_.itemCount() << " transactions"; - LOG(log_dg_) << "Received TransactionPacket with " << received_transactions.size() - << " unseen transactions:" << received_transactions << " from: " << peer->getId().abridged(); - } -} - -void TransactionPacketHandler::periodicSendTransactionsWithoutHashGossip( - std::vector &&transactions) { - std::vector>>> - peers_with_transactions_to_send; - - auto peers = peers_state_->getAllPeers(); - for (const auto &peer : peers) { - // Confirm that status messages were exchanged otherwise message might be ignored and node would - // incorrectly markTransactionAsKnown - if (!peer.second->syncing_) { - SharedTransactions peer_trxs; - for (auto const &account_trx : transactions) { - for (auto const &trx : account_trx) { - auto trx_hash = trx->getHash(); - if (peer.second->isTransactionKnown(trx_hash)) { - continue; - } - peer_trxs.push_back(trx); - if (peer_trxs.size() == kMaxTransactionsInPacket) { - peers_with_transactions_to_send.push_back({peer.first, {peer_trxs, {}}}); - peer_trxs.clear(); - }; - } - } - if (peer_trxs.size() > 0) { - peers_with_transactions_to_send.push_back({peer.first, {peer_trxs, {}}}); - } - } - } - const auto peers_to_send_count = peers_with_transactions_to_send.size(); - if (peers_to_send_count > 0) { - // Sending it in same order favours some peers over others, always start with a different position - uint32_t start_with = rand() % peers_to_send_count; - for (uint32_t i = 0; i < peers_to_send_count; i++) { - auto peer_to_send = peers_with_transactions_to_send[(start_with + i) % peers_to_send_count]; - sendTransactions(peers[peer_to_send.first], std::move(peer_to_send.second)); - } + if (!packet.transactions.empty()) { + LOG(log_tr_) << "Received TransactionPacket with " << packet.transactions.size() << " transactions"; + LOG(log_dg_) << "Received TransactionPacket with " << packet.transactions.size() + << " unseen transactions:" << unseen_txs_count << " from: " << peer->getId().abridged(); } } @@ -216,12 +149,6 @@ TransactionPacketHandler::transactionsToSendToPeers(std::vector &&transactions) { - // Support of old v2 net version. Remove once network is fully updated - if (!kHashGossip) { - periodicSendTransactionsWithoutHashGossip(std::move(transactions)); - return; - } - auto peers_with_transactions_to_send = transactionsToSendToPeers(std::move(transactions)); const auto peers_to_send_count = peers_with_transactions_to_send.size(); if (peers_to_send_count > 0) { @@ -238,31 +165,16 @@ void TransactionPacketHandler::sendTransactions(std::shared_ptr peer std::pair> &&transactions) { if (!peer) return; const auto peer_id = peer->getId(); - const auto transactions_size = transactions.first.size(); - const auto hashes_size = transactions.second.size(); LOG(log_tr_) << "sendTransactions " << transactions.first.size() << " to " << peer_id; + TransactionPacket packet{.transactions = std::move(transactions.first), + .extra_transactions_hashes = std::move(transactions.second)}; - dev::RLPStream s(kTransactionPacketItemCount); - s.appendList(transactions_size + hashes_size); - for (const auto &trx : transactions.first) { - s << trx->getHash(); - } - - for (const auto &trx_hash : transactions.second) { - s << trx_hash; - } - - s.appendList(transactions_size); - - for (const auto &trx : transactions.first) { - s.appendRaw(trx->rlp()); - } - - if (sealAndSend(peer_id, TransactionPacket, std::move(s))) { - for (const auto &trx : transactions.first) { + if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, encodePacketRlp(packet))) { + for (const auto &trx : packet.transactions) { peer->markTransactionAsKnown(trx->getHash()); } + // Note: do not mark packet.extra_transactions_hashes as known for peer - we are sending just hashes, not full txs } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp index 67a284c4fd..780d3a95a5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp @@ -1,5 +1,6 @@ #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" +#include "network/tarcap/packets/latest/vote_packet.hpp" #include "pbft/pbft_manager.hpp" #include "vote_manager/vote_manager.hpp" @@ -15,44 +16,26 @@ VotePacketHandler::VotePacketHandler(const FullNodeConfig &conf, std::shared_ptr std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, logs_prefix + "PBFT_VOTE_PH") {} -void VotePacketHandler::validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - // Vote packet can contain either just a vote or vote + block + peer_chain_size - if (items != kVotePacketSize && items != kExtendedVotePacketSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kExtendedVotePacketSize); - } -} - -void VotePacketHandler::process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) { +void VotePacketHandler::process(VotePacket &&packet, const std::shared_ptr &peer) { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - // Optional packet items - std::shared_ptr pbft_block{nullptr}; - std::optional peer_chain_size{}; + if (packet.optional_data.has_value()) { + LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash() << " with PBFT block " + << packet.optional_data->pbft_block->getBlockHash(); - std::shared_ptr vote = std::make_shared(packet_data.rlp_[0]); - if (const size_t item_count = packet_data.rlp_.itemCount(); item_count == kExtendedVotePacketSize) { - try { - pbft_block = std::make_shared(packet_data.rlp_[1]); - } catch (const std::exception &e) { - throw MaliciousPeerException(e.what()); + // Update peer's max chain size + if (packet.optional_data->peer_chain_size > peer->pbft_chain_size_) { + peer->pbft_chain_size_ = packet.optional_data->peer_chain_size; } - peer_chain_size = packet_data.rlp_[2].toInt(); - LOG(log_dg_) << "Received PBFT vote " << vote->getHash() << " with PBFT block " << pbft_block->getBlockHash(); } else { - LOG(log_dg_) << "Received PBFT vote " << vote->getHash(); - } - - // Update peer's max chain size - if (peer_chain_size.has_value() && *peer_chain_size > peer->pbft_chain_size_) { - peer->pbft_chain_size_ = *peer_chain_size; + LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash(); } - const auto vote_hash = vote->getHash(); + const auto vote_hash = packet.vote->getHash(); - if (!isPbftRelevantVote(vote)) { + if (!isPbftRelevantVote(packet.vote)) { LOG(log_dg_) << "Drop irrelevant vote " << vote_hash << " for current pbft state. Vote (period, round, step) = (" - << vote->getPeriod() << ", " << vote->getRound() << ", " << vote->getStep() + << packet.vote->getPeriod() << ", " << packet.vote->getRound() << ", " << packet.vote->getStep() << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round << ", " << pbft_mgr_->getPbftStep() << ")"; return; @@ -64,24 +47,28 @@ void VotePacketHandler::process(const threadpool::PacketData &packet_data, const return; } - if (pbft_block) { - if (pbft_block->getBlockHash() != vote->getBlockHash()) { + std::shared_ptr pbft_block; + if (packet.optional_data.has_value()) { + if (packet.optional_data->pbft_block->getBlockHash() != packet.vote->getBlockHash()) { std::ostringstream err_msg; - err_msg << "Vote " << vote->getHash().abridged() << " voted block " << vote->getBlockHash().abridged() - << " != actual block " << pbft_block->getBlockHash().abridged(); + err_msg << "Vote " << packet.vote->getHash().abridged() << " voted block " + << packet.vote->getBlockHash().abridged() << " != actual block " + << packet.optional_data->pbft_block->getBlockHash().abridged(); throw MaliciousPeerException(err_msg.str()); } - peer->markPbftBlockAsKnown(pbft_block->getBlockHash()); + peer->markPbftBlockAsKnown(packet.optional_data->pbft_block->getBlockHash()); + pbft_block = packet.optional_data->pbft_block; } - if (!processVote(vote, pbft_block, peer, true)) { + if (!processVote(packet.vote, pbft_block, peer, true)) { return; } // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes peer->markPbftVoteAsKnown(vote_hash); - onNewPbftVote(vote, pbft_block); + + pbft_mgr_->gossipVote(packet.vote, pbft_block); } void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, @@ -113,19 +100,13 @@ void VotePacketHandler::sendPbftVote(const std::shared_ptr &peer, co return; } - dev::RLPStream s; - + std::optional optional_packet_data; if (block) { - s = dev::RLPStream(kExtendedVotePacketSize); - s.appendRaw(vote->rlp(true, false)); - s.appendRaw(block->rlp(true)); - s.append(pbft_chain_->getPbftChainSize()); - } else { - s = dev::RLPStream(kVotePacketSize); - s.appendRaw(vote->rlp(true, false)); + optional_packet_data = VotePacket::OptionalData{block, pbft_chain_->getPbftChainSize()}; } - if (sealAndSend(peer->getId(), SubprotocolPacketType::VotePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotePacket, + encodePacketRlp(VotePacket(vote, std::move(optional_packet_data))))) { peer->markPbftVoteAsKnown(vote->getHash()); if (block) { peer->markPbftBlockAsKnown(block->getBlockHash()); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp index 68f653fbb8..9d5de0f714 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp @@ -17,48 +17,31 @@ VotesBundlePacketHandler::VotesBundlePacketHandler(const FullNodeConfig &conf, s std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, logs_prefix + "VOTES_BUNDLE_PH") {} -void VotesBundlePacketHandler::validatePacketRlpFormat( - [[maybe_unused]] const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items != kPbftVotesBundleRlpSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kPbftVotesBundleRlpSize); +void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::shared_ptr &peer) { + if (packet.votes_bundle.votes.size() == 0 || packet.votes_bundle.votes.size() > kMaxVotesInBundleRlp) { + throw InvalidRlpItemsCountException("VotesBundlePacket", packet.votes_bundle.votes.size(), kMaxVotesInBundleRlp); } - auto votes_count = packet_data.rlp_[kPbftVotesBundleRlpSize - 1].itemCount(); - if (votes_count == 0 || votes_count > kMaxVotesInBundleRlp) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kMaxVotesInBundleRlp); - } -} - -void VotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - const auto votes_bundle_block_hash = packet_data.rlp_[0].toHash(); - const auto votes_bundle_pbft_period = packet_data.rlp_[1].toInt(); - const auto votes_bundle_pbft_round = packet_data.rlp_[2].toInt(); - const auto votes_bundle_votes_step = packet_data.rlp_[3].toInt(); - - const auto &reference_vote = - std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, - votes_bundle_votes_step, packet_data.rlp_[4][0]); + const auto &reference_vote = packet.votes_bundle.votes.front(); const auto votes_bundle_votes_type = reference_vote->getType(); // Votes sync bundles are allowed to cotain only votes bundles of the same type, period, round and step so if first // vote is irrelevant, all of them are - if (!isPbftRelevantVote(reference_vote)) { + if (!isPbftRelevantVote(packet.votes_bundle.votes[0])) { LOG(log_wr_) << "Drop votes sync bundle as it is irrelevant for current pbft state. Votes (period, round, step) = (" - << votes_bundle_pbft_period << ", " << votes_bundle_pbft_round << ", " << reference_vote->getStep() - << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round - << ", " << pbft_mgr_->getPbftStep() << ")"; + << reference_vote->getPeriod() << ", " << reference_vote->getRound() << ", " + << reference_vote->getStep() << "). Current PBFT (period, round, step) = (" << current_pbft_period + << ", " << current_pbft_round << ", " << pbft_mgr_->getPbftStep() << ")"; return; } // VotesBundlePacket does not support propose votes if (reference_vote->getType() == PbftVoteTypes::propose_vote) { - LOG(log_er_) << "Dropping votes bundle packet due to received \"propose\" votes from " << packet_data.from_node_id_ + LOG(log_er_) << "Dropping votes bundle packet due to received \"propose\" votes from " << peer->getId() << ". The peer may be a malicious player, will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } @@ -69,10 +52,8 @@ void VotesBundlePacketHandler::process(const threadpool::PacketData &packet_data check_max_round_step = false; } - std::vector> votes; - for (const auto vote_rlp : packet_data.rlp_[4]) { - auto vote = std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, - votes_bundle_votes_step, vote_rlp); + size_t processed_votes_count = 0; + for (const auto &vote : packet.votes_bundle.votes) { peer->markPbftVoteAsKnown(vote->getHash()); // Do not process vote that has already been validated @@ -87,14 +68,14 @@ void VotesBundlePacketHandler::process(const threadpool::PacketData &packet_data continue; } - votes.push_back(std::move(vote)); + processed_votes_count++; } - LOG(log_nf_) << "Received " << packet_data.rlp_[4].itemCount() << " (processed " << votes.size() - << " ) sync votes from peer " << packet_data.from_node_id_ << " node current round " - << current_pbft_round << ", peer pbft round " << votes_bundle_pbft_round; + LOG(log_nf_) << "Received " << packet.votes_bundle.votes.size() << " (processed " << processed_votes_count + << " ) sync votes from peer " << peer->getId() << " node current round " << current_pbft_round + << ", peer pbft round " << reference_vote->getRound(); - onNewPbftVotesBundle(votes, false, packet_data.from_node_id_); + onNewPbftVotesBundle(packet.votes_bundle.votes, false, peer->getId()); } void VotesBundlePacketHandler::onNewPbftVotesBundle(const std::vector> &votes, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_bls_sig_packet_handler.cpp similarity index 89% rename from libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_bls_sig_packet_handler.cpp index 90b3ab940e..e6ce8b8677 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_bls_sig_packet_handler.cpp @@ -1,7 +1,7 @@ -#include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp" #include "pillar_chain/pillar_chain_manager.hpp" -namespace taraxa::network::tarcap { +namespace taraxa::network::tarcap::v3 { ExtPillarVotePacketHandler::ExtPillarVotePacketHandler( const FullNodeConfig &conf, std::shared_ptr peers_state, @@ -34,4 +34,4 @@ bool ExtPillarVotePacketHandler::processPillarVote(const std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -72,7 +72,7 @@ bool ExtSyncingPacketHandler::syncPeerPbft(PbftPeriod request_period) { } LOG(log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " << syncing_peer->getId(); - return sealAndSend(syncing_peer->getId(), SubprotocolPacketType::GetPbftSyncPacket, + return sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, std::move(dev::RLPStream(1) << request_period)); } @@ -161,7 +161,7 @@ void ExtSyncingPacketHandler::requestDagBlocks(const dev::p2p::NodeID &_nodeID, s.append(period); s.append(blocks); - sealAndSend(_nodeID, SubprotocolPacketType::GetDagSyncPacket, std::move(s)); + sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, std::move(s)); } -} // namespace taraxa::network::tarcap +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.cpp similarity index 95% rename from libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.cpp index 21431f602c..774342ffa7 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.cpp @@ -1,11 +1,12 @@ -#include "network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "pbft/pbft_manager.hpp" #include "vote/pbft_vote.hpp" #include "vote/votes_bundle_rlp.hpp" #include "vote_manager/vote_manager.hpp" -namespace taraxa::network::tarcap { +namespace taraxa::network::tarcap::v3 { ExtVotesPacketHandler::ExtVotesPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -93,7 +94,7 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( if (vote->getVoter() == peer->getId() && std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { // request PBFT chain sync from this node - sealAndSend(peer->getId(), SubprotocolPacketType::GetPbftSyncPacket, + sealAndSend(peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); } @@ -191,7 +192,7 @@ void ExtVotesPacketHandler::sendPbftVotesBundle(const std::shared_ptrgetId(), SubprotocolPacketType::VotesBundlePacket, std::move(votes_rlp_stream))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, std::move(votes_rlp_stream))) { LOG(log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); for (const auto &vote : votes) { peer->markPbftVoteAsKnown(vote->getHash()); @@ -221,4 +222,4 @@ void ExtVotesPacketHandler::sendPbftVotesBundle(const std::shared_ptr peers_state, std::shared_ptr packets_stats, const addr_t& node_addr, @@ -136,7 +137,8 @@ void PacketHandler::disconnect(const dev::p2p::NodeID& node_id, dev::p2p::Discon void PacketHandler::requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, PbftRound pbft_round) { LOG(log_dg_) << "Sending GetNextVotesSyncPacket with period:" << pbft_period << ", round:" << pbft_round; - sealAndSend(peerID, GetNextVotesSyncPacket, std::move(dev::RLPStream(2) << pbft_period << pbft_round)); + sealAndSend(peerID, SubprotocolPacketType::kGetNextVotesSyncPacket, + std::move(dev::RLPStream(2) << pbft_period << pbft_round)); } -} // namespace taraxa::network::tarcap +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_block_packet_handler.cpp new file mode 100644 index 0000000000..5a707de443 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_block_packet_handler.cpp @@ -0,0 +1,254 @@ +#include "network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp" + +#include "dag/dag_manager.hpp" +#include "network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp" +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "transaction/transaction_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +DagBlockPacketHandler::DagBlockPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), + std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, + logs_prefix + "DAG_BLOCK_PH"), + trx_mgr_(std::move(trx_mgr)) {} + +void DagBlockPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + constexpr size_t required_size = 2; + // Only one dag block can be received + if (packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} + +void DagBlockPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + std::unordered_map> transactions; + auto dag_rlp = packet_data.rlp_; + if (packet_data.rlp_.itemCount() == 2) { + const auto trx_count = packet_data.rlp_[0].itemCount(); + transactions.reserve(trx_count); + + for (const auto tx_rlp : packet_data.rlp_[0]) { + try { + auto trx = std::make_shared(tx_rlp); + peer->markTransactionAsKnown(trx->getHash()); + transactions.emplace(trx->getHash(), std::move(trx)); + } catch (const Transaction::InvalidTransaction &e) { + throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); + } + } + dag_rlp = packet_data.rlp_[1]; + } + auto block = std::make_shared(dag_rlp); + blk_hash_t const hash = block->getHash(); + + peer->markDagBlockAsKnown(hash); + + if (block->getLevel() > peer->dag_level_) { + peer->dag_level_ = block->getLevel(); + } + + // Do not process this block in case we already have it + if (dag_mgr_->isDagBlockKnown(block->getHash())) { + LOG(log_tr_) << "Received known DagBlockPacket " << hash << "from: " << peer->getId(); + return; + } + + onNewBlockReceived(std::move(block), peer, transactions); +} + +void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, + const std::shared_ptr &block, + const SharedTransactions &trxs) { + std::shared_ptr peer = peers_state_->getPeer(peer_id); + if (!peer) { + LOG(log_wr_) << "Send dag block " << block->getHash() << ". Failed to obtain peer " << peer_id; + return; + } + + dev::RLPStream s(2); + + // This lock prevents race condition between syncing and gossiping dag blocks + std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); + + taraxa::bytes trx_bytes; + for (uint32_t i = 0; i < trxs.size(); i++) { + auto trx_data = trxs[i]->rlp(); + trx_bytes.insert(trx_bytes.end(), std::begin(trx_data), std::end(trx_data)); + } + + s.appendList(trxs.size()); + s.appendRaw(trx_bytes, trxs.size()); + + s.appendRaw(block->rlp(true)); + + if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, std::move(s))) { + LOG(log_wr_) << "Sending DagBlock " << block->getHash() << " failed to " << peer_id; + return; + } + + // Mark data as known if sending was successful + peer->markDagBlockAsKnown(block->getHash()); +} + +void DagBlockPacketHandler::onNewBlockReceived( + std::shared_ptr &&block, const std::shared_ptr &peer, + const std::unordered_map> &trxs) { + const auto block_hash = block->getHash(); + auto verified = dag_mgr_->verifyBlock(block, trxs); + switch (verified.first) { + case DagManager::VerifyBlockReturnType::IncorrectTransactionsEstimation: + case DagManager::VerifyBlockReturnType::BlockTooBig: + case DagManager::VerifyBlockReturnType::FailedVdfVerification: + case DagManager::VerifyBlockReturnType::NotEligible: + case DagManager::VerifyBlockReturnType::FailedTipsVerification: { + std::ostringstream err_msg; + err_msg << "DagBlock " << block_hash << " failed verification with error code " + << static_cast(verified.first); + throw MaliciousPeerException(err_msg.str()); + } + case DagManager::VerifyBlockReturnType::MissingTransaction: + if (peer->dagSyncingAllowed()) { + if (trx_mgr_->transactionsDropped()) [[unlikely]] { + LOG(log_nf_) << "NewBlock " << block_hash.toString() << " from peer " << peer->getId() + << " is missing transaction, our pool recently dropped transactions, requesting dag sync"; + } else { + LOG(log_wr_) << "NewBlock " << block_hash.toString() << " from peer " << peer->getId() + << " is missing transaction, requesting dag sync"; + } + peer->peer_dag_synced_ = false; + requestPendingDagBlocks(peer); + } else { + if (trx_mgr_->transactionsDropped()) [[unlikely]] { + // Disconnecting since anything after will also contain missing pivot/tips ... + LOG(log_nf_) << "NewBlock " << block_hash.toString() << " from peer " << peer->getId() + << " is missing transaction, but our pool recently dropped transactions, disconnecting"; + disconnect(peer->getId(), dev::p2p::UserReason); + } else { + std::ostringstream err_msg; + err_msg << "DagBlock" << block_hash << " is missing a transaction while in a dag synced state"; + throw MaliciousPeerException(err_msg.str()); + } + } + break; + case DagManager::VerifyBlockReturnType::MissingTip: + if (peer->peer_dag_synced_) { + if (peer->dagSyncingAllowed()) { + LOG(log_wr_) << "NewBlock " << block_hash.toString() << " from peer " << peer->getId() + << " is missing tip, requesting dag sync"; + peer->peer_dag_synced_ = false; + requestPendingDagBlocks(peer); + } else { + std::ostringstream err_msg; + err_msg << "DagBlock has missing tip"; + throw MaliciousPeerException(err_msg.str()); + } + } else { + // peer_dag_synced_ flag ensures that this can only be performed once for a peer + requestPendingDagBlocks(peer); + } + break; + case DagManager::VerifyBlockReturnType::AheadBlock: + case DagManager::VerifyBlockReturnType::FutureBlock: + if (peer->peer_dag_synced_) { + LOG(log_er_) << "DagBlock" << block_hash << " is an ahead/future block. Peer " << peer->getId() + << " will be disconnected"; + disconnect(peer->getId(), dev::p2p::UserReason); + } + break; + case DagManager::VerifyBlockReturnType::Verified: { + auto status = dag_mgr_->addDagBlock(std::move(block), std::move(verified.second)); + if (!status.first) { + LOG(log_dg_) << "Received DagBlockPacket " << block_hash << "from: " << peer->getId(); + // Ignore new block packets when pbft syncing + if (pbft_syncing_state_->isPbftSyncing()) { + LOG(log_dg_) << "Ignore new dag block " << block_hash << ", pbft syncing is on"; + } else if (peer->peer_dag_syncing_) { + LOG(log_dg_) << "Ignore new dag block " << block_hash << ", dag syncing is on"; + } else { + if (peer->peer_dag_synced_) { + std::ostringstream err_msg; + if (status.second.size() > 0) + err_msg << "DagBlock" << block->getHash() << " has missing pivot or/and tips " << status.second; + else + err_msg << "DagBlock" << block->getHash() << " could not be added to DAG"; + throw MaliciousPeerException(err_msg.str()); + } else { + // peer_dag_synced_ flag ensures that this can only be performed once for a peer + requestPendingDagBlocks(peer); + } + } + } + } break; + case DagManager::VerifyBlockReturnType::ExpiredBlock: + break; + } +} + +void DagBlockPacketHandler::onNewBlockVerified(const std::shared_ptr &block, bool proposed, + const SharedTransactions &trxs) { + // If node is pbft syncing and block is not proposed by us, this is an old block that has been verified - no block + // gossip is needed + if (!proposed && pbft_syncing_state_->isDeepPbftSyncing()) { + return; + } + + const auto &block_hash = block->getHash(); + LOG(log_tr_) << "Verified NewBlock " << block_hash.toString(); + + std::vector peers_to_send; + for (auto const &peer : peers_state_->getAllPeers()) { + if (!peer.second->isDagBlockKnown(block_hash) && !peer.second->syncing_) { + peers_to_send.push_back(peer.first); + } + } + + std::string peer_and_transactions_to_log; + // Sending it in same order favours some peers over others, always start with a different position + const auto peers_to_send_count = peers_to_send.size(); + if (peers_to_send_count > 0) { + uint32_t start_with = rand() % peers_to_send_count; + for (uint32_t i = 0; i < peers_to_send_count; i++) { + auto peer_id = peers_to_send[(start_with + i) % peers_to_send_count]; + dev::RLPStream ts; + auto peer = peers_state_->getPeer(peer_id); + if (peer && !peer->syncing_) { + peer_and_transactions_to_log += " Peer: " + peer->getId().abridged() + " Trxs: "; + + SharedTransactions transactions_to_send; + for (const auto &trx : trxs) { + const auto &trx_hash = trx->getHash(); + if (peer->isTransactionKnown(trx_hash)) { + continue; + } + transactions_to_send.push_back(trx); + peer_and_transactions_to_log += trx_hash.abridged(); + } + + for (const auto &trx : trxs) { + assert(trx != nullptr); + const auto trx_hash = trx->getHash(); + if (peer->isTransactionKnown(trx_hash)) { + continue; + } + + transactions_to_send.push_back(trx); + peer_and_transactions_to_log += trx_hash.abridged(); + } + + sendBlockWithTransactions(peer_id, block, transactions_to_send); + peer->markDagBlockAsKnown(block_hash); + } + } + } + LOG(log_dg_) << "Send DagBlock " << block->getHash() << " to peers: " << peer_and_transactions_to_log; + if (!peers_to_send.empty()) LOG(log_tr_) << "Sent block to " << peers_to_send.size() << " peers"; +} +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_sync_packet_handler.cpp new file mode 100644 index 0000000000..0bf75ed916 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_sync_packet_handler.cpp @@ -0,0 +1,132 @@ +#include "network/tarcap/packets_handlers/v3/dag_sync_packet_handler.hpp" + +#include "dag/dag.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "transaction/transaction.hpp" +#include "transaction/transaction_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +DagSyncPacketHandler::DagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, + std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr db, + const addr_t& node_addr, const std::string& logs_prefix) + : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), + std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, + logs_prefix + "DAG_SYNC_PH"), + trx_mgr_(std::move(trx_mgr)) {} + +void DagSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData& packet_data) const { + if (constexpr size_t required_size = 4; packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} + +void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) { + auto it = packet_data.rlp_.begin(); + const auto request_period = (*it++).toInt(); + const auto response_period = (*it++).toInt(); + + // If the periods did not match restart syncing + if (response_period > request_period) { + LOG(log_dg_) << "Received DagSyncPacket with mismatching periods: " << response_period << " " << request_period + << " from " << packet_data.from_node_id_.abridged(); + if (peer->pbft_chain_size_ < response_period) { + peer->pbft_chain_size_ = response_period; + } + peer->peer_dag_syncing_ = false; + // We might be behind, restart pbft sync if needed + startSyncingPbft(); + return; + } else if (response_period < request_period) { + // This should not be possible for honest node + std::ostringstream err_msg; + err_msg << "Received DagSyncPacket with mismatching periods: response_period(" << response_period + << ") != request_period(" << request_period << ")"; + + throw MaliciousPeerException(err_msg.str()); + } + + std::vector transactions_to_log; + std::unordered_map> transactions; + const auto trx_count = (*it).itemCount(); + transactions.reserve(trx_count); + transactions_to_log.reserve(trx_count); + + for (const auto tx_rlp : (*it++)) { + try { + auto trx = std::make_shared(tx_rlp); + peer->markTransactionAsKnown(trx->getHash()); + transactions.emplace(trx->getHash(), std::move(trx)); + } catch (const Transaction::InvalidTransaction& e) { + throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); + } + } + + std::vector> dag_blocks; + std::vector dag_blocks_to_log; + dag_blocks.reserve((*it).itemCount()); + dag_blocks_to_log.reserve((*it).itemCount()); + + for (const auto block_rlp : *it) { + auto block = std::make_shared(block_rlp); + peer->markDagBlockAsKnown(block->getHash()); + if (dag_mgr_->isDagBlockKnown(block->getHash())) { + LOG(log_tr_) << "Received known DagBlock " << block->getHash() << "from: " << peer->getId(); + continue; + } + dag_blocks.emplace_back(std::move(block)); + } + + for (auto& trx : transactions) { + transactions_to_log.push_back(trx.first); + if (trx_mgr_->isTransactionKnown(trx.first)) { + continue; + } + + auto [verified, reason] = trx_mgr_->verifyTransaction(trx.second); + if (!verified) { + std::ostringstream err_msg; + err_msg << "DagBlock transaction " << trx.first << " validation failed: " << reason; + throw MaliciousPeerException(err_msg.str()); + } + } + + for (auto& block : dag_blocks) { + dag_blocks_to_log.push_back(block->getHash()); + + auto verified = dag_mgr_->verifyBlock(block, transactions); + if (verified.first != DagManager::VerifyBlockReturnType::Verified) { + std::ostringstream err_msg; + err_msg << "DagBlock " << block->getHash() << " failed verification with error code " + << static_cast(verified.first); + throw MaliciousPeerException(err_msg.str()); + } + + if (block->getLevel() > peer->dag_level_) peer->dag_level_ = block->getLevel(); + + auto status = dag_mgr_->addDagBlock(std::move(block), std::move(verified.second)); + if (!status.first) { + std::ostringstream err_msg; + if (status.second.size() > 0) + err_msg << "DagBlock" << block->getHash() << " has missing pivot or/and tips " << status.second; + else + err_msg << "DagBlock" << block->getHash() << " could not be added to DAG"; + throw MaliciousPeerException(err_msg.str()); + } + } + + peer->peer_dag_synced_ = true; + peer->peer_dag_synced_time_ = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + peer->peer_dag_syncing_ = false; + + LOG(log_dg_) << "Received DagSyncPacket with blocks: " << dag_blocks_to_log + << " Transactions: " << transactions_to_log << " from " << packet_data.from_node_id_; +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.cpp new file mode 100644 index 0000000000..eedf57a1e9 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.cpp @@ -0,0 +1,89 @@ +#include "network/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.hpp" + +#include "dag/dag_manager.hpp" +#include "transaction/transaction_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +GetDagSyncPacketHandler::GetDagSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr trx_mgr, + std::shared_ptr dag_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, logs_prefix + "GET_DAG_SYNC_PH"), + trx_mgr_(std::move(trx_mgr)), + dag_mgr_(std::move(dag_mgr)), + db_(std::move(db)) {} + +void GetDagSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (constexpr size_t required_size = 2; packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} + +void GetDagSyncPacketHandler::process(const threadpool::PacketData &packet_data, + [[maybe_unused]] const std::shared_ptr &peer) { + if (!peer->requestDagSyncingAllowed()) { + // This should not be possible for honest node + // Each node should perform dag syncing only when allowed + std::ostringstream err_msg; + err_msg << "Received multiple GetDagSyncPackets from " << packet_data.from_node_id_.abridged(); + + throw MaliciousPeerException(err_msg.str()); + } + + // This lock prevents race condition between syncing and gossiping dag blocks + std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); + + std::unordered_set blocks_hashes; + auto it = packet_data.rlp_.begin(); + const auto peer_period = (*it++).toInt(); + + std::string blocks_hashes_to_log; + for (const auto block_hash_rlp : *it) { + blk_hash_t hash = block_hash_rlp.toHash(); + blocks_hashes_to_log += hash.abridged(); + blocks_hashes.emplace(hash); + } + + LOG(log_dg_) << "Received GetDagSyncPacket: " << blocks_hashes_to_log << " from " << peer->getId(); + + auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(blocks_hashes); + if (peer_period == period) { + peer->syncing_ = false; + peer->peer_requested_dag_syncing_ = true; + peer->peer_requested_dag_syncing_time_ = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + } else { + // There is no point in sending blocks if periods do not match, but an empty packet should be sent + blocks.clear(); + transactions.clear(); + } + sendBlocks(packet_data.from_node_id_, std::move(blocks), std::move(transactions), peer_period, period); +} + +void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, + std::vector> &&blocks, + SharedTransactions &&transactions, PbftPeriod request_period, + PbftPeriod period) { + auto peer = peers_state_->getPeer(peer_id); + if (!peer) return; + + dev::RLPStream s(4); + s.append(request_period); + s.append(period); + + s.appendList(transactions.size()); + for (const auto &tx : transactions) { + s.appendRaw(tx->rlp()); + } + + s.appendList(blocks.size()); + for (const auto &block : blocks) { + s.appendRaw(block->rlp(true)); + } + + sealAndSend(peer_id, SubprotocolPacketType::kDagSyncPacket, std::move(s)); +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.cpp new file mode 100644 index 0000000000..e0d38453a2 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.cpp @@ -0,0 +1,82 @@ +#include "network/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.hpp" + +#include "pbft/pbft_manager.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +GetNextVotesBundlePacketHandler::GetNextVotesBundlePacketHandler( + const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t &node_addr, const std::string &logs_prefix) + : ExtVotesPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_mgr), + std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, + logs_prefix + "GET_NEXT_VOTES_BUNDLE_PH") {} + +void GetNextVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (constexpr size_t required_size = 2; packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} + +void GetNextVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + LOG(log_dg_) << "Received GetNextVotesSyncPacket request"; + + const PbftPeriod peer_pbft_period = packet_data.rlp_[0].toInt(); + const PbftRound peer_pbft_round = packet_data.rlp_[1].toInt(); + const auto [pbft_round, pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + // Send votes only for current_period == peer_period && current_period >= peer_round + if (pbft_period != peer_pbft_period || pbft_round == 1 || pbft_round < peer_pbft_round) { + LOG(log_nf_) << "No previous round next votes sync packet will be sent. pbft_period " << pbft_period + << ", peer_pbft_period " << peer_pbft_period << ", pbft_round " << pbft_round << ", peer_pbft_round " + << peer_pbft_round; + return; + } + + auto next_votes = + vote_mgr_->getTwoTPlusOneVotedBlockVotes(pbft_period, pbft_round - 1, TwoTPlusOneVotedBlockType::NextVotedBlock); + auto next_null_votes = vote_mgr_->getTwoTPlusOneVotedBlockVotes(pbft_period, pbft_round - 1, + TwoTPlusOneVotedBlockType::NextVotedNullBlock); + + // In edge case this could theoretically happen due to race condition when we moved to the next period or round + // right before calling getAllTwoTPlusOneNextVotes with specific period & round + if (next_votes.empty() && next_null_votes.empty()) { + // Try to get period & round values again + const auto [tmp_pbft_round, tmp_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + // No changes in period & round or new round == 1 + if (pbft_period == tmp_pbft_period && pbft_round == tmp_pbft_round) { + LOG(log_er_) << "No next votes returned for period " << tmp_pbft_period << ", round " << tmp_pbft_round - 1; + return; + } + + if (tmp_pbft_round == 1) { + LOG(log_wr_) << "No next votes returned for period " << tmp_pbft_period << ", round " << tmp_pbft_round - 1 + << " due to race condition - pbft already moved to the next period & round == 1"; + return; + } + + next_votes = vote_mgr_->getTwoTPlusOneVotedBlockVotes(pbft_period, pbft_round - 1, + TwoTPlusOneVotedBlockType::NextVotedBlock); + next_null_votes = vote_mgr_->getTwoTPlusOneVotedBlockVotes(pbft_period, pbft_round - 1, + TwoTPlusOneVotedBlockType::NextVotedNullBlock); + if (next_votes.empty() && next_null_votes.empty()) { + LOG(log_er_) << "No next votes returned for period " << tmp_pbft_period << ", round " << tmp_pbft_round - 1; + return; + } + } + + if (!next_votes.empty()) { + LOG(log_nf_) << "Send next votes bundle with " << next_votes.size() << " votes to " << peer->getId(); + sendPbftVotesBundle(peer, std::move(next_votes)); + } + + if (!next_null_votes.empty()) { + LOG(log_nf_) << "Send next null votes bundle with " << next_null_votes.size() << " votes to " << peer->getId(); + sendPbftVotesBundle(peer, std::move(next_null_votes)); + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp new file mode 100644 index 0000000000..83f0bd45f9 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp @@ -0,0 +1,102 @@ +#include "network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp" + +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "pbft/pbft_chain.hpp" +#include "pbft/period_data.hpp" +#include "storage/storage.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" +#include "vote_manager/vote_manager.hpp" +namespace taraxa::network::tarcap::v3 { +GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, + logs_prefix + "GET_PBFT_SYNC_PH"), + pbft_syncing_state_(std::move(pbft_syncing_state)), + pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), + db_(std::move(db)) {} +void GetPbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (constexpr size_t required_size = 1; packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} +void GetPbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + LOG(log_tr_) << "Received GetPbftSyncPacket Block"; + const size_t height_to_sync = packet_data.rlp_[0].toInt(); + // Here need PBFT chain size, not synced period since synced blocks has not verified yet. + const size_t my_chain_size = pbft_chain_->getPbftChainSize(); + if (height_to_sync > my_chain_size) { + // Node update peers PBFT chain size in status packet. Should not request syncing period bigger than pbft chain size + std::ostringstream err_msg; + err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + << ". That's bigger than own PBFT chain size " << my_chain_size; + throw MaliciousPeerException(err_msg.str()); + } + if (kConf.is_light_node && height_to_sync + kConf.light_node_history <= my_chain_size) { + std::ostringstream err_msg; + err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + << ". Light node does not have the data " << my_chain_size; + throw MaliciousPeerException(err_msg.str()); + } + size_t blocks_to_transfer = 0; + auto pbft_chain_synced = false; + const auto total_period_data_size = my_chain_size - height_to_sync + 1; + if (total_period_data_size <= kConf.network.sync_level_size) { + blocks_to_transfer = total_period_data_size; + pbft_chain_synced = true; + } else { + blocks_to_transfer = kConf.network.sync_level_size; + } + LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << packet_data.from_node_id_; + sendPbftBlocks(peer, height_to_sync, blocks_to_transfer, pbft_chain_synced); +} +// api for pbft syncing +void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr &peer, PbftPeriod from_period, + size_t blocks_to_transfer, bool pbft_chain_synced) { + const auto &peer_id = peer->getId(); + LOG(log_tr_) << "sendPbftBlocks: peer want to sync from pbft chain height " << from_period << ", will send at most " + << blocks_to_transfer << " pbft blocks to " << peer_id; + for (auto block_period = from_period; block_period < from_period + blocks_to_transfer; block_period++) { + bool last_block = (block_period == from_period + blocks_to_transfer - 1); + auto data = db_->getPeriodDataRaw(block_period); + if (data.size() == 0) { + // This can happen when switching from light node to full node setting + LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; + return; + } + data = PeriodData::ToOldPeriodData(data); + dev::RLPStream s; + if (pbft_chain_synced && last_block) { + // Latest finalized block cert votes are saved in db as reward votes for new blocks + const auto reward_votes = vote_mgr_->getRewardVotes(); + assert(!reward_votes.empty()); + // It is possible that the node pushed another block to the chain in the meantime + if (reward_votes[0]->getPeriod() == block_period) { + s.appendList(3); + s << last_block; + s.appendRaw(data); + s.appendRaw(encodePbftVotesBundleRlp(reward_votes)); + } else { + s.appendList(2); + s << last_block; + s.appendRaw(data); + } + } else { + s.appendList(2); + s << last_block; + s.appendRaw(data); + } + LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; + sealAndSend(peer_id, SubprotocolPacketType::kPbftSyncPacket, std::move(s)); + if (pbft_chain_synced && last_block) { + peer->syncing_ = false; + } + } +} +} // namespace taraxa::network::tarcap::v3 \ No newline at end of file diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.cpp new file mode 100644 index 0000000000..8e4fe9c569 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.cpp @@ -0,0 +1,98 @@ +#include "network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp" + +#include "network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp" + +namespace taraxa::network::tarcap::v3 { + +GetPillarVotesBundlePacketHandler::GetPillarVotesBundlePacketHandler( + const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, const addr_t &node_addr, + const std::string &logs_prefix) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, + logs_prefix + "GET_PILLAR_VOTES_BUNDLE_PH"), + pillar_chain_manager_(std::move(pillar_chain_manager)) {} + +void GetPillarVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items != kGetPillarVotesBundlePacketSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kGetPillarVotesBundlePacketSize); + } +} + +void GetPillarVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + LOG(log_dg_) << "GetPillarVotesBundlePacketHandler received from peer " << peer->getId(); + const PbftPeriod period = packet_data.rlp_[0].toInt(); + const blk_hash_t pillar_block_hash = packet_data.rlp_[1].toHash(); + + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(period)) { + std::ostringstream err_msg; + err_msg << "Pillar votes bundle request for period " << period << ", ficus hardfork block num " + << kConf.genesis.state.hardforks.ficus_hf.block_num; + throw MaliciousPeerException(err_msg.str()); + } + + if (!kConf.genesis.state.hardforks.ficus_hf.isPbftWithPillarBlockPeriod(period)) { + std::ostringstream err_msg; + err_msg << "Pillar votes bundle request for period " << period << ". Wrong requested period"; + throw MaliciousPeerException(err_msg.str()); + } + + const auto votes = pillar_chain_manager_->getVerifiedPillarVotes(period, pillar_block_hash); + if (votes.empty()) { + LOG(log_dg_) << "No pillar votes for period " << period << "and pillar block hash " << pillar_block_hash; + return; + } + // Check if the votes size exceeds the maximum limit and split into multiple packets if needed + const size_t total_votes = votes.size(); + size_t votes_sent = 0; + + while (votes_sent < total_votes) { + // Determine the size of the current chunk + const size_t chunk_size = + std::min(PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp, total_votes - votes_sent); + + // Create a new RLPStream for the chunk + dev::RLPStream s(chunk_size); + for (size_t i = 0; i < chunk_size; ++i) { + const auto &sig = votes[votes_sent + i]; + s.appendRaw(sig->rlp()); + } + + // Seal and send the chunk to the peer + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotesBundlePacket, std::move(s))) { + // Mark the votes in this chunk as known + for (size_t i = 0; i < chunk_size; ++i) { + peer->markPillarVoteAsKnown(votes[votes_sent + i]->getHash()); + } + + LOG(log_nf_) << "Pillar votes bundle for period " << period << ", hash " << pillar_block_hash << " sent to " + << peer->getId() << " (Chunk " + << (votes_sent / PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp) + 1 << "/" + << (total_votes + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp - 1) / + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp + << ")"; + } + + // Update the votes_sent counter + votes_sent += chunk_size; + } +} + +void GetPillarVotesBundlePacketHandler::requestPillarVotesBundle(PbftPeriod period, const blk_hash_t &pillar_block_hash, + const std::shared_ptr &peer) { + dev::RLPStream s(kGetPillarVotesBundlePacketSize); + s << period; + s << pillar_block_hash; + + if (sealAndSend(peer->getId(), SubprotocolPacketType::kGetPillarVotesBundlePacket, std::move(s))) { + LOG(log_nf_) << "Requested pillar votes bundle for period " << period << " and pillar block " << pillar_block_hash + << " from peer " << peer->getId(); + } else { + LOG(log_er_) << "Unable to send pillar votes bundle request for period " << period << " and pillar block " + << pillar_block_hash << " to peer " << peer->getId(); + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp new file mode 100644 index 0000000000..8af76978f3 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp @@ -0,0 +1,261 @@ +#include "network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp" + +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "pbft/pbft_chain.hpp" +#include "pbft/pbft_manager.hpp" +#include "transaction/transaction_manager.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" +namespace taraxa::network::tarcap::v3 { +PbftSyncPacketHandler::PbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), + std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, + logs_prefix + "PBFT_SYNC_PH"), + vote_mgr_(std::move(vote_mgr)), + periodic_events_tp_(1, true) {} +void PbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (packet_data.rlp_.itemCount() != kStandardPacketSize && packet_data.rlp_.itemCount() != kChainSyncedPacketSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), kStandardPacketSize); + } + // PeriodData rlp parsing cannot be done through util::rlp_tuple, which automatically checks the rlp size so it is + // checked here manually + if (packet_data.rlp_[1].itemCount() != PeriodData::kBaseRlpItemCount && + packet_data.rlp_[1].itemCount() != PeriodData::kExtendedRlpItemCount) { + throw InvalidRlpItemsCountException(packet_data.type_str_ + ":PeriodData", packet_data.rlp_[1].itemCount(), + PeriodData::kBaseRlpItemCount); + } +} +void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + // Note: no need to consider possible race conditions due to concurrent processing as it is + // disabled on priority_queue blocking dependencies level + const auto syncing_peer = pbft_syncing_state_->syncingPeer(); + if (!syncing_peer) { + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() + << " but there is no current syncing peer set"; + return; + } + if (syncing_peer->getId() != packet_data.from_node_id_) { + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() + << " current syncing peer " << syncing_peer->getId().abridged(); + return; + } + // Process received pbft blocks + // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain + const bool pbft_chain_synced = packet_data.rlp_.itemCount() == kChainSyncedPacketSize; + // last_block is the flag to indicate this is the last block in each syncing round, doesn't mean PBFT chain has synced + const bool last_block = packet_data.rlp_[0].toInt(); + PeriodData period_data; + try { + period_data = decodePeriodData(packet_data.rlp_[1]); + } catch (const std::runtime_error &e) { + throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); + } + std::vector> current_block_cert_votes; + if (pbft_chain_synced) { + current_block_cert_votes = decodeVotesBundle(packet_data.rlp_[2]); + } + const auto pbft_blk_hash = period_data.pbft_blk->getBlockHash(); + std::string received_dag_blocks_str; // This is just log related stuff + for (auto const &block : period_data.dag_blocks) { + received_dag_blocks_str += block->getHash().toString() + " "; + if (peer->dag_level_ < block->getLevel()) { + peer->dag_level_ = block->getLevel(); + } + } + const auto pbft_block_period = period_data.pbft_blk->getPeriod(); + LOG(log_dg_) << "PbftSyncPacket received. Period: " << pbft_block_period + << ", dag Blocks: " << received_dag_blocks_str << " from " << packet_data.from_node_id_; + peer->markPbftBlockAsKnown(pbft_blk_hash); + // Update peer's pbft period if outdated + if (peer->pbft_chain_size_ < pbft_block_period) { + peer->pbft_chain_size_ = pbft_block_period; + } + LOG(log_tr_) << "Processing pbft block: " << pbft_blk_hash; + if (pbft_chain_->findPbftBlockInChain(pbft_blk_hash)) { + LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << period_data.pbft_blk->getPeriod() << " from " + << packet_data.from_node_id_ << " already present in chain"; + } else { + if (pbft_block_period != pbft_mgr_->pbftSyncingPeriod() + 1) { + // This can happen if we just got synced and block was cert voted + if (pbft_chain_synced && pbft_block_period == pbft_mgr_->pbftSyncingPeriod()) { + pbftSyncComplete(); + return; + } + LOG(log_er_) << "Block " << pbft_blk_hash << " period unexpected: " << pbft_block_period + << ". Expected period: " << pbft_mgr_->pbftSyncingPeriod() + 1; + return; + } + // Check cert vote matches if final synced block + if (pbft_chain_synced) { + for (auto const &vote : current_block_cert_votes) { + if (vote->getBlockHash() != pbft_blk_hash) { + LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " << pbft_blk_hash + << " from peer " << packet_data.from_node_id_.abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + } + } + // Check votes match the hash of previous block in the queue + auto last_pbft_block_hash = pbft_mgr_->lastPbftBlockHashFromQueueOrChain(); + // Check cert vote matches + for (auto const &vote : period_data.previous_block_cert_votes) { + if (vote->getBlockHash() != last_pbft_block_hash) { + LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " + << last_pbft_block_hash << " from peer " << packet_data.from_node_id_.abridged() + << " received, stop syncing."; + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + } + if (!pbft_mgr_->validatePillarDataInPeriodData(period_data)) { + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + auto order_hash = PbftManager::calculateOrderHash(period_data.dag_blocks); + if (order_hash != period_data.pbft_blk->getOrderHash()) { + { // This is just log related stuff + std::vector trx_order; + trx_order.reserve(period_data.transactions.size()); + std::vector blk_order; + blk_order.reserve(period_data.dag_blocks.size()); + for (auto t : period_data.transactions) { + trx_order.push_back(t->getHash()); + } + for (auto b : period_data.dag_blocks) { + blk_order.push_back(b->getHash()); + } + LOG(log_er_) << "Order hash incorrect in period data " << pbft_blk_hash << " expected: " << order_hash + << " received " << period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order + << "; Trx order: " << trx_order << "; from " << packet_data.from_node_id_.abridged() + << ", stop syncing."; + } + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + // This is special case when queue is empty and we can not say for sure that all votes that are part of this block + // have been verified before + if (pbft_mgr_->periodDataQueueEmpty()) { + for (const auto &v : period_data.previous_block_cert_votes) { + if (auto vote_is_valid = vote_mgr_->validateVote(v); vote_is_valid.first == false) { + LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " + << packet_data.from_node_id_.abridged() + << " received, stop syncing. Validation failed. Err: " << vote_is_valid.second; + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + vote_mgr_->addVerifiedVote(v); + } + // And now we need to replace it with verified votes + if (auto votes = vote_mgr_->checkRewardVotes(period_data.pbft_blk, true); votes.first) { + period_data.previous_block_cert_votes = std::move(votes.second); + } else { + // checkRewardVotes could fail because we just cert voted this block and moved to next period, + // in that case we are probably fully synced + if (pbft_block_period <= vote_mgr_->getRewardVotesPbftBlockPeriod()) { + pbft_syncing_state_->setPbftSyncing(false); + return; + } + LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " + << packet_data.from_node_id_.abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + } + LOG(log_tr_) << "Synced PBFT block hash " << pbft_blk_hash << " with " + << period_data.previous_block_cert_votes.size() << " cert votes"; + LOG(log_tr_) << "Synced PBFT block " << period_data; + pbft_mgr_->periodDataQueuePush(std::move(period_data), packet_data.from_node_id_, + std::move(current_block_cert_votes)); + } + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + // Reset last sync packet received time + pbft_syncing_state_->setLastSyncPacketTime(); + if (pbft_chain_synced) { + pbftSyncComplete(); + return; + } + if (last_block) { + // If current sync period is actually bigger than the block we just received we are probably synced + if (pbft_sync_period > pbft_block_period) { + pbft_syncing_state_->setPbftSyncing(false); + return; + } + if (pbft_syncing_state_->isPbftSyncing()) { + if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { + LOG(log_tr_) << "Syncing pbft blocks too fast than processing. Has synced period " << pbft_sync_period + << ", PBFT chain size " << pbft_chain_->getPbftChainSize(); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { delayedPbftSync(1); }); + } else { + if (!syncPeerPbft(pbft_sync_period + 1)) { + pbft_syncing_state_->setPbftSyncing(false); + return; + } + } + } + } +} +PeriodData PbftSyncPacketHandler::decodePeriodData(const dev::RLP &period_data_rlp) const { + return PeriodData::FromOldPeriodData(period_data_rlp); +} +std::vector> PbftSyncPacketHandler::decodeVotesBundle( + const dev::RLP &votes_bundle_rlp) const { + return decodePbftVotesBundleRlp(votes_bundle_rlp); +} +void PbftSyncPacketHandler::pbftSyncComplete() { + if (pbft_mgr_->periodDataQueueSize()) { + LOG(log_tr_) << "Syncing pbft blocks faster than processing. Remaining sync size " + << pbft_mgr_->periodDataQueueSize(); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { pbftSyncComplete(); }); + } else { + LOG(log_dg_) << "Syncing PBFT is completed"; + // We are pbft synced with the node we are connected to but + // calling startSyncingPbft will check if some nodes have + // greater pbft chain size and we should continue syncing with + // them, Or sync pending DAG blocks + pbft_syncing_state_->setPbftSyncing(false); + startSyncingPbft(); + if (!pbft_syncing_state_->isPbftSyncing()) { + requestPendingDagBlocks(); + } + } +} +void PbftSyncPacketHandler::delayedPbftSync(uint32_t counter) { + const uint32_t max_delayed_pbft_sync_count = 60000 / kDelayedPbftSyncDelayMs; + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (counter > max_delayed_pbft_sync_count) { + LOG(log_er_) << "Pbft blocks stuck in queue, no new block processed in 60 seconds " << pbft_sync_period << " " + << pbft_chain_->getPbftChainSize(); + pbft_syncing_state_->setPbftSyncing(false); + LOG(log_tr_) << "Syncing PBFT is stopping"; + return; + } + if (pbft_syncing_state_->isPbftSyncing()) { + if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { + LOG(log_tr_) << "Syncing pbft blocks faster than processing " << pbft_sync_period << " " + << pbft_chain_->getPbftChainSize(); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this, counter] { delayedPbftSync(counter + 1); }); + } else { + if (!syncPeerPbft(pbft_sync_period + 1)) { + pbft_syncing_state_->setPbftSyncing(false); + } + } + } +} +void PbftSyncPacketHandler::handleMaliciousSyncPeer(const dev::p2p::NodeID &id) { + peers_state_->set_peer_malicious(id); + if (auto host = peers_state_->host_.lock(); host) { + LOG(log_nf_) << "Disconnect peer " << id; + host->disconnect(id, dev::p2p::UserReason); + } else { + LOG(log_er_) << "Unable to handleMaliciousSyncPeer, host == nullptr"; + } +} +} // namespace taraxa::network::tarcap::v3 \ No newline at end of file diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_vote_packet_handler.cpp new file mode 100644 index 0000000000..a70eabdeff --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_vote_packet_handler.cpp @@ -0,0 +1,64 @@ +#include "network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp" + +#include "vote/pillar_vote.hpp" + +namespace taraxa::network::tarcap::v3 { + +PillarVotePacketHandler::PillarVotePacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t &node_addr, const std::string &logs_prefix) + : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), + std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTE_PH") {} + +void PillarVotePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items != PillarVote::kStandardRlpSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, PillarVote::kStandardRlpSize); + } +} + +void PillarVotePacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + const auto pillar_vote = std::make_shared(packet_data.rlp_); + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { + std::ostringstream err_msg; + err_msg << "Pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() + << " < ficus hardfork block num"; + throw MaliciousPeerException(err_msg.str()); + } + + if (processPillarVote(pillar_vote, peer)) { + onNewPillarVote(pillar_vote); + } +} + +void PillarVotePacketHandler::onNewPillarVote(const std::shared_ptr &vote, bool rebroadcast) { + for (const auto &peer : peers_state_->getAllPeers()) { + if (peer.second->syncing_) { + LOG(log_dg_) << "Pillar vote " << vote->getHash() << ", period " << vote->getPeriod() << " not sent to " + << peer.first << ". Peer syncing"; + continue; + } + + if (peer.second->isPillarVoteKnown(vote->getHash()) && !rebroadcast) { + continue; + } + + sendPillarVote(peer.second, vote); + } +} + +void PillarVotePacketHandler::sendPillarVote(const std::shared_ptr &peer, + const std::shared_ptr &vote) { + dev::RLPStream s; + s.appendRaw(vote->rlp()); + + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotePacket, std::move(s))) { + peer->markPillarVoteAsKnown(vote->getHash()); + LOG(log_dg_) << "Pillar vote " << vote->getHash() << ", period " << vote->getPeriod() << " sent to " + << peer->getId(); + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.cpp new file mode 100644 index 0000000000..00178ebcc9 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.cpp @@ -0,0 +1,40 @@ +#include "network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp" + +#include "vote/pillar_vote.hpp" + +namespace taraxa::network::tarcap::v3 { + +PillarVotesBundlePacketHandler::PillarVotesBundlePacketHandler( + const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, const addr_t &node_addr, + const std::string &logs_prefix) + : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), + std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTES_BUNDLE_PH") {} + +void PillarVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items == 0 || items > kMaxPillarVotesInBundleRlp) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kMaxPillarVotesInBundleRlp); + } +} + +void PillarVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + // TODO[2744]: there could be the same protection as in pbft syncing that only requested bundle packet is accepted + LOG(log_dg_) << "PillarVotesBundlePacket received from peer " << peer->getId(); + + for (const auto vote_rlp : packet_data.rlp_) { + const auto pillar_vote = std::make_shared(vote_rlp); + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { + std::ostringstream err_msg; + err_msg << "Synced pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() + << " < ficus hardfork block num"; + throw MaliciousPeerException(err_msg.str()); + } + + processPillarVote(pillar_vote, peer); + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/readme.md b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/readme.md new file mode 100644 index 0000000000..3872e21f26 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/readme.md @@ -0,0 +1,6 @@ +### Multiple taraxa capabilities support +- Derive new packet handlers with different logic than the original ones. +- +`!!! Important:` These handlers must be +directly on indirectly derived from the latest packets handlers, which are inside +`network/tarcap/packets_handlers/latest/` folder, otherwise network class would not work properly diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/status_packet_handler.cpp new file mode 100644 index 0000000000..97a8a05eec --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/status_packet_handler.cpp @@ -0,0 +1,198 @@ +#include "network/tarcap/packets_handlers/v3/status_packet_handler.hpp" + +#include "config/version.hpp" +#include "dag/dag.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "pbft/pbft_chain.hpp" +#include "pbft/pbft_manager.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +StatusPacketHandler::StatusPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, + std::shared_ptr dag_mgr, std::shared_ptr db, + h256 genesis_hash, const addr_t& node_addr, const std::string& logs_prefix) + : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), + std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, + logs_prefix + "STATUS_PH"), + kGenesisHash(genesis_hash) {} + +void StatusPacketHandler::validatePacketRlpFormat(const threadpool::PacketData& packet_data) const { + if (const auto items_count = packet_data.rlp_.itemCount(); + items_count != kInitialStatusPacketItemsCount && items_count != kStandardStatusPacketItemsCount) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), + kStandardStatusPacketItemsCount); + } +} + +void StatusPacketHandler::process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) { + // Important !!! Use only "selected_peer" and not "peer" in this function as "peer" might be nullptr + auto selected_peer = peer; + const auto pbft_synced_period = pbft_mgr_->pbftSyncingPeriod(); + + // Initial status packet + if (packet_data.rlp_.itemCount() == kInitialStatusPacketItemsCount) { + if (!selected_peer) { + selected_peer = peers_state_->getPendingPeer(packet_data.from_node_id_); + if (!selected_peer) { + LOG(log_wr_) << "Peer " << packet_data.from_node_id_.abridged() + << " missing in both peers and pending peers map - will be disconnected."; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + return; + } + } + + auto it = packet_data.rlp_.begin(); + auto const peer_chain_id = (*it++).toInt(); + auto const peer_dag_level = (*it++).toInt(); + auto const genesis_hash = (*it++).toHash(); + auto const peer_pbft_chain_size = (*it++).toInt(); + auto const peer_syncing = (*it++).toInt(); + auto const peer_pbft_round = (*it++).toInt(); + auto const node_major_version = (*it++).toInt(); + auto const node_minor_version = (*it++).toInt(); + auto const node_patch_version = (*it++).toInt(); + auto const is_light_node = (*it++).toInt(); + auto const node_history = (*it++).toInt(); + + if (peer_chain_id != kConf.genesis.chain_id) { + LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) + << "Incorrect network id " << peer_chain_id << ", host " << packet_data.from_node_id_.abridged() + << " will be disconnected"; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + return; + } + + if (genesis_hash != kGenesisHash) { + LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) + << "Incorrect genesis hash " << genesis_hash << ", host " << packet_data.from_node_id_.abridged() + << " will be disconnected"; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + return; + } + + // If this is a light node and it cannot serve our sync request disconnect from it + if (is_light_node) { + selected_peer->peer_light_node = true; + selected_peer->peer_light_node_history = node_history; + if (pbft_synced_period + node_history < peer_pbft_chain_size) { + LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) + << "Light node " << packet_data.from_node_id_.abridged() + << " would not be able to serve our syncing request. " + << "Current synced period " << pbft_synced_period << ", peer synced period " << peer_pbft_chain_size + << ", peer light node history " << node_history << ". Peer will be disconnected"; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + return; + } + } + + selected_peer->dag_level_ = peer_dag_level; + selected_peer->pbft_chain_size_ = peer_pbft_chain_size; + selected_peer->syncing_ = peer_syncing; + selected_peer->pbft_period_ = peer_pbft_chain_size + 1; + selected_peer->pbft_round_ = peer_pbft_round; + + peers_state_->setPeerAsReadyToSendMessages(packet_data.from_node_id_, selected_peer); + + LOG(log_dg_) << "Received initial status message from " << packet_data.from_node_id_ << ", network id " + << peer_chain_id << ", peer DAG max level " << selected_peer->dag_level_ << ", genesis " + << genesis_hash << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " + << std::boolalpha << selected_peer->syncing_ << ", peer pbft period " << selected_peer->pbft_period_ + << ", peer pbft round " << selected_peer->pbft_round_ << ", node major version" << node_major_version + << ", node minor version" << node_minor_version << ", node patch version" << node_patch_version; + + } else { // Standard status packet + // TODO: initial and standard status packet could be separated... + if (!selected_peer) { + LOG(log_er_) << "Received standard status packet from " << packet_data.from_node_id_.abridged() + << ", without previously received initial status packet. Will be disconnected"; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + return; + } + + auto it = packet_data.rlp_.begin(); + selected_peer->dag_level_ = (*it++).toInt(); + selected_peer->pbft_chain_size_ = (*it++).toInt(); + selected_peer->pbft_period_ = selected_peer->pbft_chain_size_ + 1; + selected_peer->syncing_ = (*it++).toInt(); + selected_peer->pbft_round_ = (*it++).toInt(); + + // TODO: Address malicious status + if (!pbft_syncing_state_->isPbftSyncing()) { + if (pbft_synced_period < selected_peer->pbft_chain_size_) { + LOG(log_nf_) << "Restart PBFT chain syncing. Own synced PBFT at period " << pbft_synced_period + << ", peer PBFT chain size " << selected_peer->pbft_chain_size_; + if (pbft_synced_period + 1 < selected_peer->pbft_chain_size_) { + startSyncingPbft(); + } else { + // If we are behind by only one block wait for two status messages before syncing because nodes are not always + // in perfect sync + if (selected_peer->last_status_pbft_chain_size_ == selected_peer->pbft_chain_size_) { + startSyncingPbft(); + } + } + } else if (pbft_synced_period == selected_peer->pbft_chain_size_ && !selected_peer->peer_dag_synced_) { + // if not syncing and the peer period is matching our period request any pending dag blocks + requestPendingDagBlocks(selected_peer); + } + + const auto [pbft_current_round, pbft_current_period] = pbft_mgr_->getPbftRoundAndPeriod(); + if (pbft_current_period == selected_peer->pbft_period_ && pbft_current_round < selected_peer->pbft_round_) { + requestPbftNextVotesAtPeriodRound(selected_peer->getId(), pbft_current_period, pbft_current_round); + } + } + selected_peer->last_status_pbft_chain_size_ = selected_peer->pbft_chain_size_.load(); + + LOG(log_dg_) << "Received status message from " << packet_data.from_node_id_ << ", peer DAG max level " + << selected_peer->dag_level_ << ", peer pbft chain size " << selected_peer->pbft_chain_size_ + << ", peer syncing " << std::boolalpha << selected_peer->syncing_ << ", peer pbft round " + << selected_peer->pbft_round_; + } +} + +bool StatusPacketHandler::sendStatus(const dev::p2p::NodeID& node_id, bool initial) { + bool success = false; + std::string status_packet_type = initial ? "initial" : "standard"; + + LOG(log_dg_) << "Sending " << status_packet_type << " status message to " << node_id << ", protocol version " + << TARAXA_NET_VERSION << ", network id " << kConf.genesis.chain_id << ", genesis " << kGenesisHash + << ", node version " << TARAXA_VERSION; + + auto dag_max_level = dag_mgr_->getMaxLevel(); + auto pbft_chain_size = pbft_chain_->getPbftChainSize(); + const auto pbft_round = pbft_mgr_->getPbftRound(); + + if (initial) { + success = sealAndSend( + node_id, SubprotocolPacketType::kStatusPacket, + std::move(dev::RLPStream(kInitialStatusPacketItemsCount) + << kConf.genesis.chain_id << dag_max_level << kGenesisHash << pbft_chain_size + << pbft_syncing_state_->isPbftSyncing() << pbft_round << TARAXA_MAJOR_VERSION << TARAXA_MINOR_VERSION + << TARAXA_PATCH_VERSION << kConf.is_light_node << kConf.light_node_history)); + } else { + success = sealAndSend( + node_id, SubprotocolPacketType::kStatusPacket, + std::move(dev::RLPStream(kStandardStatusPacketItemsCount) + << dag_max_level << pbft_chain_size << pbft_syncing_state_->isDeepPbftSyncing() << pbft_round)); + } + + return success; +} + +void StatusPacketHandler::sendStatusToPeers() { + auto host = peers_state_->host_.lock(); + if (!host) { + LOG(log_er_) << "Unavailable host during checkLiveness"; + return; + } + + for (auto const& peer : peers_state_->getAllPeers()) { + sendStatus(peer.first, false); + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/transaction_packet_handler.cpp new file mode 100644 index 0000000000..8cb528826b --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/transaction_packet_handler.cpp @@ -0,0 +1,222 @@ +#include "network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp" + +#include + +#include "transaction/transaction.hpp" +#include "transaction/transaction_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +TransactionPacketHandler::TransactionPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr trx_mgr, const addr_t &node_addr, + const std::string &logs_prefix) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, logs_prefix + "TRANSACTION_PH"), + trx_mgr_(std::move(trx_mgr)) {} + +void TransactionPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items != kTransactionPacketItemCount) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kTransactionPacketItemCount); + } + auto hashes_count = packet_data.rlp_[0].itemCount(); + auto trx_count = packet_data.rlp_[1].itemCount(); + + if (hashes_count < trx_count) { + throw InvalidRlpItemsCountException(packet_data.type_str_, hashes_count, trx_count); + } + if (hashes_count == 0 || hashes_count > kMaxTransactionsInPacket + kMaxHashesInPacket) { + throw InvalidRlpItemsCountException(packet_data.type_str_, hashes_count, + kMaxTransactionsInPacket + kMaxHashesInPacket); + } + + if (trx_count > kMaxTransactionsInPacket) { + throw InvalidRlpItemsCountException(packet_data.type_str_, trx_count, kMaxTransactionsInPacket); + } +} + +inline void TransactionPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + std::vector received_transactions; + + const auto transaction_hashes_count = packet_data.rlp_[0].itemCount(); + const auto transaction_count = packet_data.rlp_[1].itemCount(); + received_transactions.reserve(transaction_count); + + std::vector trx_hashes; + trx_hashes.reserve(transaction_hashes_count); + + // First extract only transaction hashes + for (const auto trx_hash_rlp : packet_data.rlp_[0]) { + auto trx_hash = trx_hash_rlp.toHash(); + peer->markTransactionAsKnown(trx_hash); + trx_hashes.emplace_back(std::move(trx_hash)); + } + + for (size_t tx_idx = 0; tx_idx < transaction_count; tx_idx++) { + const auto &trx_hash = trx_hashes[tx_idx]; + + // Skip any transactions that are already known to the trx mgr + if (trx_mgr_->isTransactionKnown(trx_hash)) { + continue; + } + + std::shared_ptr transaction; + // Deserialization is expensive, do it only for the transactions we are about to process + try { + transaction = std::make_shared(packet_data.rlp_[1][tx_idx].data().toBytes()); + received_transactions.emplace_back(trx_hash); + } catch (const Transaction::InvalidTransaction &e) { + throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); + } + + const auto [verified, reason] = trx_mgr_->verifyTransaction(transaction); + if (!verified) { + std::ostringstream err_msg; + err_msg << "DagBlock transaction " << transaction->getHash() << " validation failed: " << reason; + throw MaliciousPeerException(err_msg.str()); + } + + received_trx_count_++; + const auto tx_hash = transaction->getHash(); + const auto status = trx_mgr_->insertValidatedTransaction(std::move(transaction)); + if (status == TransactionStatus::Inserted) { + unique_received_trx_count_++; + } + if (status == TransactionStatus::Overflow) { + // Raise exception in trx pool is over the limit and this peer already has too many suspicious packets + if (peer->reportSuspiciousPacket() && trx_mgr_->nonProposableTransactionsOverTheLimit()) { + std::ostringstream err_msg; + err_msg << "Suspicious packets over the limit on DagBlock transaction " << tx_hash << " validation: " << reason; + } + } + } + + if (transaction_count > 0) { + LOG(log_tr_) << "Received TransactionPacket with " << packet_data.rlp_.itemCount() << " transactions"; + LOG(log_dg_) << "Received TransactionPacket with " << received_transactions.size() + << " unseen transactions:" << received_transactions << " from: " << peer->getId().abridged(); + } +} + +std::pair>> +TransactionPacketHandler::transactionsToSendToPeer(std::shared_ptr peer, + const std::vector &transactions, + uint32_t account_start_index) { + const auto accounts_size = transactions.size(); + bool trx_max_reached = false; + auto account_iterator = account_start_index; + std::pair> result; + // Next peer should continue after the last account of the current peer + uint32_t next_peer_account_index = (account_start_index + 1) % accounts_size; + + while (true) { + // Iterate over transactions from single account + for (auto const &trx : transactions[account_iterator]) { + auto trx_hash = trx->getHash(); + if (peer->isTransactionKnown(trx_hash)) { + continue; + } + // If max number of transactions to be sent is already reached include hashes to be sent + if (trx_max_reached) { + result.second.push_back(trx_hash); + if (result.second.size() == kMaxHashesInPacket) { + // If both transactions and hashes reached max nothing to do for this peer, return + return {next_peer_account_index, std::move(result)}; + } + } else { + result.first.push_back(trx); + if (result.first.size() == kMaxTransactionsInPacket) { + // Max number of transactions reached, save next_peer_account_index for next peer to continue to avoid + // sending same transactions to multiple peers + trx_max_reached = true; + next_peer_account_index = (account_iterator + 1) % accounts_size; + } + } + } + + account_iterator = (account_iterator + 1) % accounts_size; + if (account_iterator == account_start_index) { + // Iterated through all of the transactions, return + return {next_peer_account_index, std::move(result)}; + } + } +} + +std::vector, std::pair>>> +TransactionPacketHandler::transactionsToSendToPeers(std::vector &&transactions) { + // Main goal of the algorithm below is to send different transactions and hashes to different peers but still follow + // nonce ordering for single account and not send higher nonces without sending low nonces first + const auto accounts_size = transactions.size(); + if (!accounts_size) { + return {}; + } + std::vector, std::pair>>> + peers_with_transactions_to_send; + auto peers = peers_state_->getAllPeers(); + + // account_index keeps current account index so that different peers will receive + // transactions from different accounts + uint32_t account_index = 0; + for (const auto &peer : peers) { + if (peer.second->syncing_) { + continue; + } + + std::pair> peer_transactions; + std::tie(account_index, peer_transactions) = transactionsToSendToPeer(peer.second, transactions, account_index); + + if (peer_transactions.first.size() > 0) { + peers_with_transactions_to_send.push_back({peer.second, std::move(peer_transactions)}); + } + } + + return peers_with_transactions_to_send; +} + +void TransactionPacketHandler::periodicSendTransactions(std::vector &&transactions) { + auto peers_with_transactions_to_send = transactionsToSendToPeers(std::move(transactions)); + const auto peers_to_send_count = peers_with_transactions_to_send.size(); + if (peers_to_send_count > 0) { + // Sending it in same order favours some peers over others, always start with a different position + uint32_t start_with = rand() % peers_to_send_count; + for (uint32_t i = 0; i < peers_to_send_count; i++) { + auto peer_to_send = peers_with_transactions_to_send[(start_with + i) % peers_to_send_count]; + sendTransactions(peer_to_send.first, std::move(peer_to_send.second)); + } + } +} + +void TransactionPacketHandler::sendTransactions(std::shared_ptr peer, + std::pair> &&transactions) { + if (!peer) return; + const auto peer_id = peer->getId(); + const auto transactions_size = transactions.first.size(); + const auto hashes_size = transactions.second.size(); + + LOG(log_tr_) << "sendTransactions " << transactions.first.size() << " to " << peer_id; + + dev::RLPStream s(kTransactionPacketItemCount); + s.appendList(transactions_size + hashes_size); + for (const auto &trx : transactions.first) { + s << trx->getHash(); + } + + for (const auto &trx_hash : transactions.second) { + s << trx_hash; + } + + s.appendList(transactions_size); + + for (const auto &trx : transactions.first) { + s.appendRaw(trx->rlp()); + } + + if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, std::move(s))) { + for (const auto &trx : transactions.first) { + peer->markTransactionAsKnown(trx->getHash()); + } + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/vote_packet_handler.cpp new file mode 100644 index 0000000000..184ae41e82 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/vote_packet_handler.cpp @@ -0,0 +1,141 @@ +#include "network/tarcap/packets_handlers/v3/vote_packet_handler.hpp" + +#include "pbft/pbft_manager.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +VotePacketHandler::VotePacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_mgr, std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t &node_addr, + const std::string &logs_prefix) + : ExtVotesPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_mgr), + std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, + logs_prefix + "PBFT_VOTE_PH") {} + +void VotePacketHandler::validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + // Vote packet can contain either just a vote or vote + block + peer_chain_size + if (items != kVotePacketSize && items != kExtendedVotePacketSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kExtendedVotePacketSize); + } +} + +void VotePacketHandler::process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + // Optional packet items + std::shared_ptr pbft_block{nullptr}; + std::optional peer_chain_size{}; + + std::shared_ptr vote = std::make_shared(packet_data.rlp_[0]); + if (const size_t item_count = packet_data.rlp_.itemCount(); item_count == kExtendedVotePacketSize) { + try { + pbft_block = std::make_shared(packet_data.rlp_[1]); + } catch (const std::exception &e) { + throw MaliciousPeerException(e.what()); + } + peer_chain_size = packet_data.rlp_[2].toInt(); + LOG(log_dg_) << "Received PBFT vote " << vote->getHash() << " with PBFT block " << pbft_block->getBlockHash(); + } else { + LOG(log_dg_) << "Received PBFT vote " << vote->getHash(); + } + + // Update peer's max chain size + if (peer_chain_size.has_value() && *peer_chain_size > peer->pbft_chain_size_) { + peer->pbft_chain_size_ = *peer_chain_size; + } + + const auto vote_hash = vote->getHash(); + + if (!isPbftRelevantVote(vote)) { + LOG(log_dg_) << "Drop irrelevant vote " << vote_hash << " for current pbft state. Vote (period, round, step) = (" + << vote->getPeriod() << ", " << vote->getRound() << ", " << vote->getStep() + << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round + << ", " << pbft_mgr_->getPbftStep() << ")"; + return; + } + + // Do not process vote that has already been validated + if (vote_mgr_->voteAlreadyValidated(vote_hash)) { + LOG(log_dg_) << "Received vote " << vote_hash << " has already been validated"; + return; + } + + if (pbft_block) { + if (pbft_block->getBlockHash() != vote->getBlockHash()) { + std::ostringstream err_msg; + err_msg << "Vote " << vote->getHash().abridged() << " voted block " << vote->getBlockHash().abridged() + << " != actual block " << pbft_block->getBlockHash().abridged(); + throw MaliciousPeerException(err_msg.str()); + } + + peer->markPbftBlockAsKnown(pbft_block->getBlockHash()); + } + + if (!processVote(vote, pbft_block, peer, true)) { + return; + } + + // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes + peer->markPbftVoteAsKnown(vote_hash); + + pbft_mgr_->gossipVote(vote, pbft_block); +} + +void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, + bool rebroadcast) { + for (const auto &peer : peers_state_->getAllPeers()) { + if (peer.second->syncing_) { + LOG(log_dg_) << " PBFT vote " << vote->getHash() << " not sent to " << peer.first << " peer syncing"; + continue; + } + + if (!rebroadcast && peer.second->isPbftVoteKnown(vote->getHash())) { + continue; + } + + // Send also block in case it is not known for the pear or rebroadcast == true + if (rebroadcast || !peer.second->isPbftBlockKnown(vote->getBlockHash())) { + sendPbftVote(peer.second, vote, block); + } else { + sendPbftVote(peer.second, vote, nullptr); + } + } +} + +void VotePacketHandler::sendPbftVote(const std::shared_ptr &peer, const std::shared_ptr &vote, + const std::shared_ptr &block) { + if (block && block->getBlockHash() != vote->getBlockHash()) { + LOG(log_er_) << "Vote " << vote->getHash().abridged() << " voted block " << vote->getBlockHash().abridged() + << " != actual block " << block->getBlockHash().abridged(); + return; + } + + dev::RLPStream s; + + if (block) { + s = dev::RLPStream(kExtendedVotePacketSize); + s.appendRaw(vote->rlp(true, false)); + s.appendRaw(block->rlp(true)); + s.append(pbft_chain_->getPbftChainSize()); + } else { + s = dev::RLPStream(kVotePacketSize); + s.appendRaw(vote->rlp(true, false)); + } + + if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotePacket, std::move(s))) { + peer->markPbftVoteAsKnown(vote->getHash()); + if (block) { + peer->markPbftBlockAsKnown(block->getBlockHash()); + LOG(log_dg_) << " PBFT vote " << vote->getHash() << " together with block " << block->getBlockHash() + << " sent to " << peer->getId(); + } else { + LOG(log_dg_) << " PBFT vote " << vote->getHash() << " sent to " << peer->getId(); + } + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/votes_bundle_packet_handler.cpp new file mode 100644 index 0000000000..1b89bae0cd --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/votes_bundle_packet_handler.cpp @@ -0,0 +1,125 @@ +#include "network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp" + +#include "pbft/pbft_manager.hpp" +#include "vote/votes_bundle_rlp.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +VotesBundlePacketHandler::VotesBundlePacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, + const addr_t &node_addr, const std::string &logs_prefix) + : ExtVotesPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_mgr), + std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, + logs_prefix + "VOTES_BUNDLE_PH") {} + +void VotesBundlePacketHandler::validatePacketRlpFormat( + [[maybe_unused]] const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items != kPbftVotesBundleRlpSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kPbftVotesBundleRlpSize); + } + + auto votes_count = packet_data.rlp_[kPbftVotesBundleRlpSize - 1].itemCount(); + if (votes_count == 0 || votes_count > kMaxVotesInBundleRlp) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kMaxVotesInBundleRlp); + } +} + +void VotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + const auto votes_bundle_block_hash = packet_data.rlp_[0].toHash(); + const auto votes_bundle_pbft_period = packet_data.rlp_[1].toInt(); + const auto votes_bundle_pbft_round = packet_data.rlp_[2].toInt(); + const auto votes_bundle_votes_step = packet_data.rlp_[3].toInt(); + + const auto &reference_vote = + std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, + votes_bundle_votes_step, packet_data.rlp_[4][0]); + const auto votes_bundle_votes_type = reference_vote->getType(); + + // Votes sync bundles are allowed to cotain only votes bundles of the same type, period, round and step so if first + // vote is irrelevant, all of them are + if (!isPbftRelevantVote(reference_vote)) { + LOG(log_wr_) << "Drop votes sync bundle as it is irrelevant for current pbft state. Votes (period, round, step) = (" + << votes_bundle_pbft_period << ", " << votes_bundle_pbft_round << ", " << reference_vote->getStep() + << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round + << ", " << pbft_mgr_->getPbftStep() << ")"; + return; + } + + // VotesBundlePacket does not support propose votes + if (reference_vote->getType() == PbftVoteTypes::propose_vote) { + LOG(log_er_) << "Dropping votes bundle packet due to received \"propose\" votes from " << packet_data.from_node_id_ + << ". The peer may be a malicious player, will be disconnected"; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + return; + } + + // Process processStandardVote is called with false in case of next votes bundle -> does not check max boundaries + // for round and step to actually being able to sync the current round in case network is stalled + bool check_max_round_step = true; + if (votes_bundle_votes_type == PbftVoteTypes::cert_vote || votes_bundle_votes_type == PbftVoteTypes::next_vote) { + check_max_round_step = false; + } + + std::vector> votes; + for (const auto vote_rlp : packet_data.rlp_[4]) { + auto vote = std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, + votes_bundle_votes_step, vote_rlp); + peer->markPbftVoteAsKnown(vote->getHash()); + + // Do not process vote that has already been validated + if (vote_mgr_->voteAlreadyValidated(vote->getHash())) { + LOG(log_dg_) << "Received vote " << vote->getHash() << " has already been validated"; + continue; + } + + LOG(log_dg_) << "Received sync vote " << vote->getHash().abridged(); + + if (!processVote(vote, nullptr, peer, check_max_round_step)) { + continue; + } + + votes.push_back(std::move(vote)); + } + + LOG(log_nf_) << "Received " << packet_data.rlp_[4].itemCount() << " (processed " << votes.size() + << " ) sync votes from peer " << packet_data.from_node_id_ << " node current round " + << current_pbft_round << ", peer pbft round " << votes_bundle_pbft_round; + + onNewPbftVotesBundle(votes, false, packet_data.from_node_id_); +} + +void VotesBundlePacketHandler::onNewPbftVotesBundle(const std::vector> &votes, + bool rebroadcast, + const std::optional &exclude_node) { + for (const auto &peer : peers_state_->getAllPeers()) { + if (peer.second->syncing_) { + continue; + } + + if (exclude_node.has_value() && *exclude_node == peer.first) { + continue; + } + + std::vector> peer_votes; + for (const auto &vote : votes) { + if (!rebroadcast && peer.second->isPbftVoteKnown(vote->getHash())) { + continue; + } + + peer_votes.push_back(vote); + } + + sendPbftVotesBundle(peer.second, std::move(peer_votes)); + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/shared_states/peers_state.cpp b/libraries/core_libs/network/src/tarcap/shared_states/peers_state.cpp index 22f3d7c08d..b109b15d12 100644 --- a/libraries/core_libs/network/src/tarcap/shared_states/peers_state.cpp +++ b/libraries/core_libs/network/src/tarcap/shared_states/peers_state.cpp @@ -40,7 +40,7 @@ std::pair, std::string> PeersState::getPacketSenderP // If peer is in pending_peers_, it means he has not yet sent us initial status packet and // we can receive/send only StatusPacket from/to him if (const auto it_peer = pending_peers_.find(node_id); it_peer != pending_peers_.end()) { - if (packet_type == SubprotocolPacketType::StatusPacket) { + if (packet_type == SubprotocolPacketType::kStatusPacket) { return {it_peer->second, ""}; } else { std::ostringstream error; diff --git a/libraries/core_libs/network/src/tarcap/stats/node_stats.cpp b/libraries/core_libs/network/src/tarcap/stats/node_stats.cpp index f9e1a175b3..d1854de97a 100644 --- a/libraries/core_libs/network/src/tarcap/stats/node_stats.cpp +++ b/libraries/core_libs/network/src/tarcap/stats/node_stats.cpp @@ -11,6 +11,7 @@ #include "pbft/pbft_manager.hpp" #include "transaction/transaction_manager.hpp" #include "vote_manager/vote_manager.hpp" + namespace taraxa::network::tarcap { NodeStats::NodeStats(std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, diff --git a/libraries/core_libs/network/src/tarcap/stats/time_period_packets_stats.cpp b/libraries/core_libs/network/src/tarcap/stats/time_period_packets_stats.cpp index d59a767285..9301af99d9 100644 --- a/libraries/core_libs/network/src/tarcap/stats/time_period_packets_stats.cpp +++ b/libraries/core_libs/network/src/tarcap/stats/time_period_packets_stats.cpp @@ -1,8 +1,9 @@ #include "network/tarcap/stats/time_period_packets_stats.hpp" +#include + #include "common/util.hpp" -#include "json/writer.h" -#include "network/tarcap/shared_states/peers_state.hpp" +#include "network/tarcap/taraxa_peer.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 562feb5f38..2442f20e86 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -2,6 +2,7 @@ #include +#include "config/version.hpp" #include "network/tarcap/packets_handler.hpp" #include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp" @@ -16,6 +17,19 @@ #include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/dag_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/status_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "node/node.hpp" #include "pbft/pbft_chain.hpp" @@ -23,7 +37,6 @@ #include "pillar_chain/pillar_chain_manager.hpp" #include "slashing_manager/slashing_manager.hpp" #include "transaction/transaction_manager.hpp" -#include "vote/pbft_vote.hpp" namespace taraxa::network::tarcap { @@ -61,7 +74,7 @@ std::string TaraxaCapability::name() const { return TARAXA_CAPABILITY_NAME; } TarcapVersion TaraxaCapability::version() const { return version_; } -unsigned TaraxaCapability::messageCount() const { return SubprotocolPacketType::PacketCount; } +unsigned TaraxaCapability::messageCount() const { return SubprotocolPacketType::kPacketCount; } void TaraxaCapability::onConnect(std::weak_ptr session, u256 const &) { const auto session_p = session.lock(); @@ -78,11 +91,24 @@ void TaraxaCapability::onConnect(std::weak_ptr session, u256 return; } + // If queue is over the limit do not allow new nodes to connect until queue size is reduced + if (queue_over_limit_ && peers_state_->getPeersCount() >= last_disconnect_number_of_peers_) { + session_p->disconnect(dev::p2p::UserReason); + LOG(log_wr_) << "Node " << node_id << " connection dropped - queue over limit"; + return; + } + peers_state_->addPendingPeer(node_id, session_p->info().host + ":" + std::to_string(session_p->info().port)); LOG(log_nf_) << "Node " << node_id << " connected"; - auto status_packet_handler = packets_handlers_->getSpecificHandler(); - status_packet_handler->sendStatus(node_id, true); + // TODO[2905]: refactor + if (version_ == TARAXA_NET_VERSION) { + auto status_packet_handler = packets_handlers_->getSpecificHandler(); + status_packet_handler->sendStatus(node_id, true); + } else { + auto status_packet_handler = packets_handlers_->getSpecificHandler(); + status_packet_handler->sendStatus(node_id, true); + } } void TaraxaCapability::onDisconnect(dev::p2p::NodeID const &_nodeID) { @@ -94,7 +120,13 @@ void TaraxaCapability::onDisconnect(dev::p2p::NodeID const &_nodeID) { pbft_syncing_state_->setPbftSyncing(false); if (peers_state_->getPeersCount() > 0) { LOG(log_dg_) << "Restart PBFT/DAG syncing due to syncing peer disconnect."; - packets_handlers_->getSpecificHandler()->startSyncingPbft(); + // TODO[2905]: refactor + if (version_ == TARAXA_NET_VERSION) { + packets_handlers_->getSpecificHandler()->startSyncingPbft(); + } else { + packets_handlers_->getSpecificHandler()->startSyncingPbft(); + } + } else { LOG(log_dg_) << "Stop PBFT/DAG syncing due to syncing peer disconnect and no other peers available."; } @@ -167,28 +199,11 @@ void TaraxaCapability::interpretCapabilityPacket(std::weak_ptr kConf.network.ddos_protection.max_packets_queue_size) { - const auto connected_peers = peers_state_->getAllPeers(); - // Always keep at least 5 connected peers - if (connected_peers.size() > 5) { - // Find peer with the highest processing time and disconnect him - std::pair peer_max_processing_time{std::chrono::microseconds(0), - dev::p2p::NodeID()}; - - for (const auto &connected_peer : connected_peers) { - const auto peer_packets_stats = connected_peer.second->getAllPacketsStatsCopy(); - - if (peer_packets_stats.second.processing_duration_ > peer_max_processing_time.first) { - peer_max_processing_time = {peer_packets_stats.second.processing_duration_, connected_peer.first}; - } - } - - // Disconnect peer with the highest processing time - LOG(log_er_) << "Max allowed packets queue size " << kConf.network.ddos_protection.max_packets_queue_size - << " exceeded: " << tp_queue_size << ". Peer with the highest processing time " - << peer_max_processing_time.second << " will be disconnected"; - host->disconnect(node_id, dev::p2p::UserReason); - return; - } + // Queue size is over the limit + handlePacketQueueOverLimit(host, node_id, tp_queue_size); + } else { + queue_over_limit_ = false; + last_disconnect_number_of_peers_ = 0; } // TODO: we are making a copy here for each packet bytes(toBytes()), which is pretty significant. Check why RLP does @@ -196,11 +211,50 @@ void TaraxaCapability::interpretCapabilityPacket(std::weak_ptrpush({version(), threadpool::PacketData(packet_type, node_id, _r.data().toBytes())}); } +void TaraxaCapability::handlePacketQueueOverLimit(std::shared_ptr host, dev::p2p::NodeID node_id, + size_t tp_queue_size) { + if (!queue_over_limit_) { + queue_over_limit_start_time_ = std::chrono::system_clock::now(); + queue_over_limit_ = true; + } + + // Check if Queue is over the limit for queue_limit_time + if ((std::chrono::system_clock::now() - queue_over_limit_start_time_) > + kConf.network.ddos_protection.queue_limit_time) { + // Only disconnect if there is more than peer_disconnect_interval since last disconnect + if ((std::chrono::system_clock::now() - last_ddos_disconnect_time_) > + kConf.network.ddos_protection.peer_disconnect_interval) { + auto connected_peers = peers_state_->getAllPeers(); + last_disconnect_number_of_peers_ = connected_peers.size(); + last_ddos_disconnect_time_ = std::chrono::system_clock::now(); + // Always keep at least 5 connected peers + if (connected_peers.size() > 5) { + // Find peers with the highest processing time and disconnect + std::pair peer_max_processing_time{std::chrono::microseconds(0), + dev::p2p::NodeID()}; + for (const auto &connected_peer : connected_peers) { + const auto peer_packets_stats = connected_peer.second->getAllPacketsStatsCopy(); + if (peer_packets_stats.second.processing_duration_ > peer_max_processing_time.first) { + peer_max_processing_time = {peer_packets_stats.second.processing_duration_, connected_peer.first}; + } + } + + // Disconnect peer with the highest processing time + LOG(log_er_) << "Max allowed packets queue size " << kConf.network.ddos_protection.max_packets_queue_size + << " exceeded: " << tp_queue_size << ". Peer with the highest processing time " + << peer_max_processing_time.second << " will be disconnected"; + host->disconnect(node_id, dev::p2p::UserReason); + connected_peers.erase(node_id); + } + } + } +} + inline bool TaraxaCapability::filterSyncIrrelevantPackets(SubprotocolPacketType packet_type) const { switch (packet_type) { - case StatusPacket: - case GetPbftSyncPacket: - case PbftSyncPacket: + case SubprotocolPacketType::kStatusPacket: + case SubprotocolPacketType::kGetPbftSyncPacket: + case SubprotocolPacketType::kPbftSyncPacket: return false; default: return true; @@ -216,7 +270,7 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitLatestVersion const std::shared_ptr &pbft_mgr, const std::shared_ptr &pbft_chain, const std::shared_ptr &vote_mgr, const std::shared_ptr &dag_mgr, const std::shared_ptr &trx_mgr, const std::shared_ptr &slashing_manager, - const std::shared_ptr &pillar_chain_mgr, TarcapVersion version, + const std::shared_ptr &pillar_chain_mgr, TarcapVersion, const addr_t &node_addr) { auto packets_handlers = std::make_shared(); // Consensus packets with high processing priority @@ -229,13 +283,11 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitLatestVersion // Standard packets with mid processing priority packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, - pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, - version > kV2NetworkVersion, node_addr, logs_prefix); + pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, node_addr, + logs_prefix); - // Support for transaition from V2 to V3, once all nodes update to V3 post next hardfork, V2 support can be - // removed packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, - node_addr, version > kV2NetworkVersion, logs_prefix); + node_addr, logs_prefix); // Non critical packets with low processing priority packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, @@ -264,4 +316,58 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitLatestVersion return packets_handlers; }; +const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitV4Handlers = + [](const std::string &logs_prefix, const FullNodeConfig &config, const h256 &genesis_hash, + const std::shared_ptr &peers_state, const std::shared_ptr &pbft_syncing_state, + const std::shared_ptr &packets_stats, const std::shared_ptr &db, + const std::shared_ptr &pbft_mgr, const std::shared_ptr &pbft_chain, + const std::shared_ptr &vote_mgr, const std::shared_ptr &dag_mgr, + const std::shared_ptr &trx_mgr, const std::shared_ptr &slashing_manager, + const std::shared_ptr &pillar_chain_mgr, TarcapVersion, + const addr_t &node_addr) { + auto packets_handlers = std::make_shared(); + // Consensus packets with high processing priority + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_mgr, pbft_chain, + vote_mgr, slashing_manager, node_addr, logs_prefix); + packets_handlers->registerHandler( + + config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); + packets_handlers->registerHandler( + config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); + + // Standard packets with mid processing priority + packets_handlers->registerHandler(config, peers_state, packets_stats, + pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, + trx_mgr, db, node_addr, logs_prefix); + + packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, + node_addr, logs_prefix); + + // Non critical packets with low processing priority + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, + pbft_chain, pbft_mgr, dag_mgr, db, genesis_hash, + node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, + dag_mgr, db, node_addr, logs_prefix); + + packets_handlers->registerHandler(config, peers_state, packets_stats, + pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, + trx_mgr, db, node_addr, logs_prefix); + + packets_handlers->registerHandler( + config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, vote_mgr, db, node_addr, logs_prefix); + + packets_handlers->registerHandler(config, peers_state, packets_stats, + pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, + vote_mgr, db, node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, + pillar_chain_mgr, node_addr, logs_prefix); + packets_handlers->registerHandler( + config, peers_state, packets_stats, pillar_chain_mgr, node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, + pillar_chain_mgr, node_addr, logs_prefix); + + return packets_handlers; + }; + } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/threadpool/packet_data.cpp b/libraries/core_libs/network/src/threadpool/packet_data.cpp index 5f8c187c46..84fb8ec5de 100644 --- a/libraries/core_libs/network/src/threadpool/packet_data.cpp +++ b/libraries/core_libs/network/src/threadpool/packet_data.cpp @@ -17,13 +17,13 @@ PacketData::PacketData(SubprotocolPacketType type, const dev::p2p::NodeID& from_ * @return PacketPriority based om packet_type */ PacketData::PacketPriority PacketData::getPacketPriority(SubprotocolPacketType packet_type) { - if (packet_type > SubprotocolPacketType::HighPriorityPackets && - packet_type < SubprotocolPacketType::MidPriorityPackets) { + if (packet_type > SubprotocolPacketType::kHighPriorityPackets && + packet_type < SubprotocolPacketType::kMidPriorityPackets) { return PacketPriority::High; - } else if (packet_type > SubprotocolPacketType::MidPriorityPackets && - packet_type < SubprotocolPacketType::LowPriorityPackets) { + } else if (packet_type > SubprotocolPacketType::kMidPriorityPackets && + packet_type < SubprotocolPacketType::kLowPriorityPackets) { return PacketPriority::Mid; - } else if (packet_type > SubprotocolPacketType::LowPriorityPackets) { + } else if (packet_type > SubprotocolPacketType::kLowPriorityPackets) { return PacketPriority::Low; } diff --git a/libraries/core_libs/network/src/threadpool/packets_blocking_mask.cpp b/libraries/core_libs/network/src/threadpool/packets_blocking_mask.cpp index 2eec31f637..9341d3885c 100644 --- a/libraries/core_libs/network/src/threadpool/packets_blocking_mask.cpp +++ b/libraries/core_libs/network/src/threadpool/packets_blocking_mask.cpp @@ -207,7 +207,7 @@ bool PacketsBlockingMask::isPacketBlocked(const PacketData& packet_data) const { // Custom blocks for specific packet types... // Check if DagBlockPacket is blocked by processing some dag blocks with <= dag level - if (packet_data.type_ == SubprotocolPacketType::DagBlockPacket && + if (packet_data.type_ == SubprotocolPacketType::kDagBlockPacket && (isDagBlockPacketBlockedByLevel(packet_data) || isDagBlockPacketBlockedBySameDagBlock(packet_data))) { return true; } diff --git a/libraries/core_libs/network/src/threadpool/priority_queue.cpp b/libraries/core_libs/network/src/threadpool/priority_queue.cpp index 8311626ddb..14b7754004 100644 --- a/libraries/core_libs/network/src/threadpool/priority_queue.cpp +++ b/libraries/core_libs/network/src/threadpool/priority_queue.cpp @@ -125,11 +125,40 @@ void PriorityQueue::updateDependenciesStart(const PacketData& packet) { act_total_workers_count_++; packets_queues_[packet.priority_].incrementActWorkersCount(); + updateBlockingDependencies(packet); +} + +void PriorityQueue::updateDependenciesFinish(const PacketData& packet, std::mutex& queue_mutex, + std::condition_variable& cond_var) { + assert(act_total_workers_count_ > 0); + + if (!isNonBlockingPacket(packet.type_)) { + // Note: every blocking packet must lock queue_mutex !!! + std::unique_lock lock(queue_mutex); + updateBlockingDependencies(packet, true); + cond_var.notify_all(); + } + + act_total_workers_count_--; + packets_queues_[packet.priority_].decrementActWorkersCount(); +} - // Process all dependencies here - it is called when packet processing has started - // !!! Important - there is a "mirror" function updateDependenciesFinish and all dependencies that are set - // here should be unset in updateDependenciesFinish +bool PriorityQueue::isNonBlockingPacket(SubprotocolPacketType packet_type) const { + // Note: any packet type that is not in this switch should be processed in updateDependencies + switch (packet_type) { + case SubprotocolPacketType::kVotePacket: + case SubprotocolPacketType::kGetNextVotesSyncPacket: + case SubprotocolPacketType::kVotesBundlePacket: + case SubprotocolPacketType::kStatusPacket: + case SubprotocolPacketType::kPillarVotePacket: + return true; + } + return false; +} + +bool PriorityQueue::updateBlockingDependencies(const PacketData& packet, bool unblock_processing) { + // Note: any packet type that is not in this switch should be processed in isNonBlockingPacket switch (packet.type_) { // Packets that can be processed only 1 at the time // GetDagSyncPacket -> serve dag syncing data to only 1 node at the time @@ -137,87 +166,61 @@ void PriorityQueue::updateDependenciesStart(const PacketData& packet) { // GetPillarVotesBundlePacket -> serve pillar votes syncing data to only 1 node at the time // PillarVotesBundlePacket -> process only 1 packet at a time. TODO[2744]: remove after protection mechanism is // implemented PbftSyncPacket -> process sync pbft blocks synchronously - case SubprotocolPacketType::GetDagSyncPacket: - case SubprotocolPacketType::GetPbftSyncPacket: - case SubprotocolPacketType::GetPillarVotesBundlePacket: - case SubprotocolPacketType::PillarVotesBundlePacket: // TODO[2744]: remove - case SubprotocolPacketType::PbftSyncPacket: - blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); + case SubprotocolPacketType::kGetDagSyncPacket: + case SubprotocolPacketType::kGetPbftSyncPacket: + case SubprotocolPacketType::kGetPillarVotesBundlePacket: + case SubprotocolPacketType::kPillarVotesBundlePacket: // TODO[2744]: remove + case SubprotocolPacketType::kPbftSyncPacket: { + if (!unblock_processing) { + blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); + } else { + blocked_packets_mask_.markPacketAsHardUnblocked(packet, packet.type_); + } break; + } // When syncing dag blocks, process only 1 packet at a time: // DagSyncPacket -> process sync dag blocks synchronously // DagBlockPacket -> wait with processing of new dag blocks until old blocks are synced - case SubprotocolPacketType::DagSyncPacket: - blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); - blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::DagBlockPacket); + case SubprotocolPacketType::kDagSyncPacket: { + if (!unblock_processing) { + blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); + blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::kDagBlockPacket); + } else { + blocked_packets_mask_.markPacketAsHardUnblocked(packet, packet.type_); + blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::kDagBlockPacket); + } break; + } // When processing TransactionPacket, processing of all dag block packets that were received after that (from the // same peer). No need to block processing of dag blocks packets received before as it should not be possible to // send dag block before sending txs it contains... - case SubprotocolPacketType::TransactionPacket: - blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::DagBlockPacket); - break; - - case SubprotocolPacketType::DagBlockPacket: - blocked_packets_mask_.setDagBlockLevelBeingProcessed(packet); - blocked_packets_mask_.setDagBlockBeingProcessed(packet); - break; - - default: - break; - } -} - -void PriorityQueue::updateDependenciesFinish(const PacketData& packet, std::mutex& queue_mutex, - std::condition_variable& cond_var) { - assert(act_total_workers_count_ > 0); - - // Process all dependencies here - it is called when packet processing is finished - - // Note: every case in this switch must lock queue_mutex !!! - switch (packet.type_) { - case SubprotocolPacketType::GetDagSyncPacket: - case SubprotocolPacketType::GetPbftSyncPacket: - case SubprotocolPacketType::GetPillarVotesBundlePacket: - case SubprotocolPacketType::PillarVotesBundlePacket: // TODO[2744]: remove - case SubprotocolPacketType::PbftSyncPacket: { - std::unique_lock lock(queue_mutex); - blocked_packets_mask_.markPacketAsHardUnblocked(packet, packet.type_); - cond_var.notify_all(); - break; - } - - case SubprotocolPacketType::DagSyncPacket: { - std::unique_lock lock(queue_mutex); - blocked_packets_mask_.markPacketAsHardUnblocked(packet, packet.type_); - blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::DagBlockPacket); - cond_var.notify_all(); - break; - } - - case SubprotocolPacketType::TransactionPacket: { - std::unique_lock lock(queue_mutex); - blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::DagBlockPacket); - cond_var.notify_all(); + case SubprotocolPacketType::kTransactionPacket: { + if (!unblock_processing) { + blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::kDagBlockPacket); + } else { + blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::kDagBlockPacket); + } break; } - case SubprotocolPacketType::DagBlockPacket: { - std::unique_lock lock(queue_mutex); - blocked_packets_mask_.unsetDagBlockLevelBeingProcessed(packet); - blocked_packets_mask_.unsetDagBlockBeingProcessed(packet); - cond_var.notify_all(); + case SubprotocolPacketType::kDagBlockPacket: { + if (!unblock_processing) { + blocked_packets_mask_.setDagBlockLevelBeingProcessed(packet); + blocked_packets_mask_.setDagBlockBeingProcessed(packet); + } else { + blocked_packets_mask_.unsetDagBlockLevelBeingProcessed(packet); + blocked_packets_mask_.unsetDagBlockBeingProcessed(packet); + } break; } default: - break; + return false; } - act_total_workers_count_--; - packets_queues_[packet.priority_].decrementActWorkersCount(); + return true; } size_t PriorityQueue::getPrirotityQueueSize(PacketData::PacketPriority priority) const { diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index d8f74f4470..d25a1175d6 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -7,9 +7,7 @@ #include #include "common/jsoncpp.hpp" -#include "common/util.hpp" -#include "config/config.hpp" -#include "network/rpc/eth/Eth.h" +#include "network/rpc/eth/data.hpp" namespace taraxa::net { @@ -51,14 +49,21 @@ void WsSession::on_read(beast::error_code ec, std::size_t bytes_transferred) { return close(is_normal(ec)); } - LOG(log_tr_) << "WS READ " << (static_cast(read_buffer_.data().data())); + auto ws_server = ws_server_.lock(); + if (ws_server && ws_server->pendingTasksOverLimit()) { + LOG(log_er_) << "WS closed - pending tasks over the limit " << ws_server->numberOfPendingTasks(); + return close(true); + } + LOG(log_tr_) << "WS READ " << (static_cast(read_buffer_.data().data())); processAsync(); // Do another read do_read(); } void WsSession::processAsync() { + if (closed_) return; + std::string request(static_cast(read_buffer_.data().data()), read_buffer_.size()); read_buffer_.consume(read_buffer_.size()); LOG(log_tr_) << "processAsync " << request; @@ -75,6 +80,8 @@ void WsSession::processAsync() { } void WsSession::writeAsync(std::string &&message) { + if (closed_) return; + LOG(log_tr_) << "WS WRITE " << message.c_str(); auto executor = ws_.get_executor(); if (!executor) { @@ -113,12 +120,12 @@ void WsSession::newEthBlock(const ::taraxa::final_chain::BlockHeader &payload, c writeAsync(std::move(response)); } } -void WsSession::newDagBlock(DagBlock const &blk) { +void WsSession::newDagBlock(const std::shared_ptr &blk) { if (new_dag_blocks_subscription_) { Json::Value res, params; res["jsonrpc"] = "2.0"; res["method"] = "eth_subscription"; - params["result"] = blk.getJson(); + params["result"] = blk->getJson(); params["subscription"] = dev::toJS(new_dag_blocks_subscription_); res["params"] = params; auto response = util::to_string(res); @@ -200,8 +207,13 @@ bool WsSession::is_normal(const beast::error_code &ec) const { return false; } -WsServer::WsServer(boost::asio::io_context &ioc, tcp::endpoint endpoint, addr_t node_addr) - : ioc_(ioc), acceptor_(ioc), node_addr_(std::move(node_addr)) { +WsServer::WsServer(std::shared_ptr thread_pool, tcp::endpoint endpoint, addr_t node_addr, + uint32_t max_pending_tasks) + : ioc_(thread_pool->unsafe_get_io_context()), + acceptor_(thread_pool->unsafe_get_io_context()), + thread_pool_(thread_pool), + kMaxPendingTasks(max_pending_tasks), + node_addr_(std::move(node_addr)) { LOG_OBJECTS_CREATE("WS_SERVER"); beast::error_code ec; @@ -280,7 +292,7 @@ void WsServer::on_accept(beast::error_code ec, tcp::socket socket) { if (!stopped_) do_accept(); } -void WsServer::newDagBlock(DagBlock const &blk) { +void WsServer::newDagBlock(const std::shared_ptr &blk) { boost::shared_lock lock(sessions_mtx_); for (auto const &session : sessions) { if (!session->is_closed()) session->newDagBlock(blk); @@ -329,4 +341,12 @@ uint32_t WsServer::numberOfSessions() { return sessions.size(); } +uint32_t WsServer::numberOfPendingTasks() const { + auto thread_pool = thread_pool_.lock(); + if (thread_pool) { + return thread_pool->num_pending_tasks(); + } + return 0; +} + } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/node/include/node/node.hpp b/libraries/core_libs/node/include/node/node.hpp index a0b9aa87b5..2a658ecd2f 100644 --- a/libraries/core_libs/node/include/node/node.hpp +++ b/libraries/core_libs/node/include/node/node.hpp @@ -5,18 +5,10 @@ #include #include -#include #include -#include -#include -#include -#include #include "common/thread_pool.hpp" -#include "common/util.hpp" -#include "common/vrf_wrapper.hpp" #include "config/config.hpp" -#include "config/version.hpp" #include "network/http_server.hpp" #include "network/rpc/DebugFace.h" #include "network/rpc/EthFace.h" @@ -26,7 +18,6 @@ #include "network/ws_server.hpp" #include "pbft/pbft_chain.hpp" #include "storage/storage.hpp" -#include "transaction/transaction.hpp" #include "vote_manager/vote_manager.hpp" namespace taraxa { @@ -89,8 +80,8 @@ class FullNode : public std::enable_shared_from_this { using JsonRpcServer = ModularServer; // should be destroyed after all components, since they may depend on it through unsafe pointers - std::unique_ptr rpc_thread_pool_; - std::unique_ptr graphql_thread_pool_; + std::shared_ptr rpc_thread_pool_; + std::shared_ptr graphql_thread_pool_; // In cae we will you config for this TP, it needs to be unique_ptr !!! util::ThreadPool subscription_pool_; @@ -114,7 +105,7 @@ class FullNode : public std::enable_shared_from_this { std::shared_ptr pbft_chain_; std::shared_ptr pillar_chain_mgr_; std::shared_ptr key_manager_; - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; std::shared_ptr jsonrpc_http_; std::shared_ptr graphql_http_; std::shared_ptr jsonrpc_ws_; diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index cb65d26a3a..bc86ad17d9 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -5,13 +5,12 @@ #include #include #include -#include #include #include "dag/dag.hpp" #include "dag/dag_block.hpp" #include "dag/dag_block_proposer.hpp" -#include "final_chain/final_chain_impl.hpp" +#include "final_chain/final_chain.hpp" #include "graphql/http_processor.hpp" #include "graphql/ws_server.hpp" #include "key_manager/key_manager.hpp" @@ -30,7 +29,6 @@ #include "pillar_chain/pillar_chain_manager.hpp" #include "slashing_manager/slashing_manager.hpp" #include "storage/migration/migration_manager.hpp" -#include "storage/migration/transaction_period.hpp" #include "transaction/gas_pricer.hpp" #include "transaction/transaction_manager.hpp" @@ -95,7 +93,6 @@ void FullNode::init() { if (conf_.db_config.fix_trx_period) { migration_manager.applyTransactionPeriod(); } - if (db_->getDagBlocksCount() == 0) { db_->setGenesisHash(conf_.genesis.genesisHash()); } @@ -113,7 +110,7 @@ void FullNode::init() { } gas_pricer_ = std::make_shared(conf_.genesis.gas_price, conf_.is_light_node, db_); - final_chain_ = std::make_shared(db_, conf_, node_addr); + final_chain_ = std::make_shared(db_, conf_, node_addr); key_manager_ = std::make_shared(final_chain_); trx_mgr_ = std::make_shared(conf_, db_, final_chain_, node_addr); @@ -130,20 +127,14 @@ void FullNode::init() { } pbft_chain_ = std::make_shared(node_addr, db_); - dag_mgr_ = std::make_shared( - conf_.genesis.dag_genesis_block, node_addr, conf_.genesis.sortition, conf_.genesis.dag, trx_mgr_, pbft_chain_, - final_chain_, db_, key_manager_, conf_.genesis.pbft.gas_limit, conf_.genesis.state, conf_.is_light_node, - conf_.light_node_history, conf_.max_levels_per_period, conf_.dag_expiry_limit); - auto slashing_manager = std::make_shared(final_chain_, trx_mgr_, gas_pricer_, conf_, kp_.secret()); - vote_mgr_ = std::make_shared(node_addr, conf_.genesis.pbft, kp_.secret(), conf_.vrf_secret, db_, - pbft_chain_, final_chain_, key_manager_, slashing_manager); + dag_mgr_ = std::make_shared(conf_, node_addr, trx_mgr_, pbft_chain_, final_chain_, db_, key_manager_); + auto slashing_manager = std::make_shared(conf_, final_chain_, trx_mgr_, gas_pricer_); + vote_mgr_ = std::make_shared(conf_, db_, pbft_chain_, final_chain_, key_manager_, slashing_manager); pillar_chain_mgr_ = std::make_shared(conf_.genesis.state.hardforks.ficus_hf, db_, final_chain_, key_manager_, node_addr); - pbft_mgr_ = std::make_shared(conf_.genesis, node_addr, db_, pbft_chain_, vote_mgr_, dag_mgr_, trx_mgr_, - final_chain_, pillar_chain_mgr_, kp_.secret()); - dag_block_proposer_ = std::make_shared( - conf_.genesis.dag.block_proposer, dag_mgr_, trx_mgr_, final_chain_, db_, key_manager_, node_addr, getSecretKey(), - getVrfSecretKey(), conf_.genesis.pbft.gas_limit, conf_.genesis.dag.gas_limit, conf_.genesis.state); + pbft_mgr_ = std::make_shared(conf_, db_, pbft_chain_, vote_mgr_, dag_mgr_, trx_mgr_, final_chain_, + pillar_chain_mgr_); + dag_block_proposer_ = std::make_shared(conf_, dag_mgr_, trx_mgr_, final_chain_, db_, key_manager_); network_ = std::make_shared(conf_, genesis_hash, conf_.net_file_path().string(), kp_, db_, pbft_mgr_, pbft_chain_, @@ -182,7 +173,7 @@ void FullNode::start() { // Inits rpc related members if (conf_.network.rpc) { - rpc_thread_pool_ = std::make_unique(conf_.network.rpc->threads_num); + rpc_thread_pool_ = std::make_shared(conf_.network.rpc->threads_num); net::rpc::eth::EthParams eth_rpc_params; eth_rpc_params.address = getAddress(); eth_rpc_params.chain_id = conf_.genesis.chain_id; @@ -214,10 +205,10 @@ void FullNode::start() { auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); std::shared_ptr test_json_rpc; - if (conf_.enable_test_rpc) { - // TODO Because this object refers to FullNode, the lifecycle/dependency management is more complicated); - test_json_rpc = std::make_shared(shared_from_this()); - } + // if (conf_.enable_test_rpc) { + // TODO Because this object refers to FullNode, the lifecycle/dependency management is more complicated); + test_json_rpc = std::make_shared(shared_from_this()); + //} std::shared_ptr debug_json_rpc; if (conf_.enable_debug) { @@ -235,16 +226,15 @@ void FullNode::start() { if (conf_.network.rpc->http_port) { auto json_rpc_processor = std::make_shared(); jsonrpc_http_ = std::make_shared( - rpc_thread_pool_->unsafe_get_io_context(), - boost::asio::ip::tcp::endpoint{conf_.network.rpc->address, *conf_.network.rpc->http_port}, getAddress(), - json_rpc_processor); + rpc_thread_pool_, boost::asio::ip::tcp::endpoint{conf_.network.rpc->address, *conf_.network.rpc->http_port}, + getAddress(), json_rpc_processor, conf_.network.rpc->max_pending_tasks); jsonrpc_api_->addConnector(json_rpc_processor); jsonrpc_http_->start(); } if (conf_.network.rpc->ws_port) { jsonrpc_ws_ = std::make_shared( - rpc_thread_pool_->unsafe_get_io_context(), - boost::asio::ip::tcp::endpoint{conf_.network.rpc->address, *conf_.network.rpc->ws_port}, getAddress()); + rpc_thread_pool_, boost::asio::ip::tcp::endpoint{conf_.network.rpc->address, *conf_.network.rpc->ws_port}, + getAddress(), conf_.network.rpc->max_pending_tasks); jsonrpc_api_->addConnector(jsonrpc_ws_); jsonrpc_ws_->run(); } @@ -281,7 +271,7 @@ void FullNode::start() { }, *rpc_thread_pool_); dag_mgr_->block_verified_.subscribe( - [eth_json_rpc = as_weak(eth_json_rpc), ws = as_weak(jsonrpc_ws_)](auto const &dag_block) { + [eth_json_rpc = as_weak(eth_json_rpc), ws = as_weak(jsonrpc_ws_)](const std::shared_ptr &dag_block) { if (auto _ws = ws.lock()) { _ws->newDagBlock(dag_block); } @@ -289,22 +279,23 @@ void FullNode::start() { *rpc_thread_pool_); } if (conf_.network.graphql) { - graphql_thread_pool_ = std::make_unique(conf_.network.graphql->threads_num); + graphql_thread_pool_ = std::make_shared(conf_.network.graphql->threads_num); if (conf_.network.graphql->ws_port) { graphql_ws_ = std::make_shared( - graphql_thread_pool_->unsafe_get_io_context(), - boost::asio::ip::tcp::endpoint{conf_.network.graphql->address, *conf_.network.graphql->ws_port}, - getAddress()); + graphql_thread_pool_, + boost::asio::ip::tcp::endpoint{conf_.network.graphql->address, *conf_.network.graphql->ws_port}, getAddress(), + conf_.network.rpc->max_pending_tasks); // graphql_ws_->run(); } if (conf_.network.graphql->http_port) { graphql_http_ = std::make_shared( - graphql_thread_pool_->unsafe_get_io_context(), + graphql_thread_pool_, boost::asio::ip::tcp::endpoint{conf_.network.graphql->address, *conf_.network.graphql->http_port}, getAddress(), std::make_shared(final_chain_, dag_mgr_, pbft_mgr_, trx_mgr_, db_, gas_pricer_, - as_weak(network_), conf_.genesis.chain_id)); + as_weak(network_), conf_.genesis.chain_id), + conf_.network.rpc->max_pending_tasks); graphql_http_->start(); } } @@ -423,7 +414,7 @@ void FullNode::rebuildDb() { LOG(log_nf_) << "Adding PBFT block " << period_data->pbft_blk->getBlockHash().toString() << " from old DB into syncing queue for processing, final chain size: " - << final_chain_->last_block_number(); + << final_chain_->lastBlockNumber(); pbft_mgr_->periodDataQueuePush(std::move(*period_data), dev::p2p::NodeID(), std::move(cert_votes)); pbft_mgr_->waitForPeriodFinalization(); diff --git a/libraries/core_libs/storage/include/storage/migration/final_chain_header.hpp b/libraries/core_libs/storage/include/storage/migration/final_chain_header.hpp new file mode 100644 index 0000000000..a7b2c111b1 --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/final_chain_header.hpp @@ -0,0 +1,16 @@ +#pragma once +#include + +#include "storage/migration/migration_base.hpp" + +namespace taraxa::storage::migration { +class FinalChainHeader : public migration::Base { + public: + FinalChainHeader(std::shared_ptr db); + std::string id() override; + uint32_t dbVersion() override; + + protected: + void migrate(logger::Logger& log) override; +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp index 5102d2541c..be2c20f433 100644 --- a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp +++ b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp @@ -10,7 +10,7 @@ class Base { // We need to specify version here, so in case of major version change(db reindex) we won't apply unneeded migrations virtual uint32_t dbVersion() = 0; - bool isApplied() { return db_->lookup_int(id(), DB::Columns::migrations).has_value(); } + bool isApplied() { return db_->lookup_int(id(), DbStorage::Columns::migrations).has_value(); } void apply(logger::Logger& log) { migrate(log); @@ -22,9 +22,9 @@ class Base { // Method with custom logic. All db changes should be made using `batch_` virtual void migrate(logger::Logger& log) = 0; - void setApplied() { db_->insert(batch_, DB::Columns::migrations, id(), true); } + void setApplied() { db_->insert(batch_, DbStorage::Columns::migrations, id(), true); } std::shared_ptr db_; - DB::Batch batch_; + Batch batch_; }; } // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/migration/period_dag_blocks.hpp b/libraries/core_libs/storage/include/storage/migration/period_dag_blocks.hpp new file mode 100644 index 0000000000..0584717414 --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/period_dag_blocks.hpp @@ -0,0 +1,12 @@ +#pragma once +#include "storage/migration/migration_base.hpp" + +namespace taraxa::storage::migration { +class PeriodDagBlocks : public migration::Base { + public: + PeriodDagBlocks(std::shared_ptr db); + std::string id() override; + uint32_t dbVersion() override; + void migrate(logger::Logger& log) override; +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/migration/transaction_period.hpp b/libraries/core_libs/storage/include/storage/migration/transaction_period.hpp index 4baf0a6ad7..b29786e785 100644 --- a/libraries/core_libs/storage/include/storage/migration/transaction_period.hpp +++ b/libraries/core_libs/storage/include/storage/migration/transaction_period.hpp @@ -1,8 +1,6 @@ #pragma once #include -#include "common/thread_pool.hpp" -#include "pbft/period_data.hpp" #include "storage/migration/migration_base.hpp" namespace taraxa::storage::migration { diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 4f33fa06c5..47264c59f2 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -7,7 +7,7 @@ #include #include -#include +#include #include "common/types.hpp" #include "dag/dag_block.hpp" @@ -16,7 +16,6 @@ #include "pbft/period_data.hpp" #include "pillar_chain/pillar_block.hpp" #include "storage/uint_comparator.hpp" -#include "transaction/system_transaction.hpp" #include "transaction/transaction.hpp" #include "vote/pillar_vote.hpp" #include "vote_manager/verified_votes.hpp" @@ -53,6 +52,8 @@ enum PbftMgrStatus : uint8_t { NextVotedNullBlockHash, }; +enum class DBMetaKeys { LAST_NUMBER = 1 }; + class DbException : public std::exception { public: explicit DbException(const std::string& desc) : desc_(desc) {} @@ -69,12 +70,12 @@ class DbException : public std::exception { const std::string desc_; }; +using Batch = rocksdb::WriteBatch; +using Slice = rocksdb::Slice; +using OnEntry = std::function; + class DbStorage : public std::enable_shared_from_this { public: - using Slice = rocksdb::Slice; - using Batch = rocksdb::WriteBatch; - using OnEntry = std::function; - class Column { string const name_; @@ -224,6 +225,7 @@ class DbStorage : public std::enable_shared_from_this { void savePeriodData(const PeriodData& period_data, Batch& write_batch); void clearPeriodDataHistory(PbftPeriod period, uint64_t dag_level_to_keep); dev::bytes getPeriodDataRaw(PbftPeriod period) const; + std::optional getPeriodData(PbftPeriod period) const; std::optional getPbftBlock(PbftPeriod period) const; std::vector> getPeriodCertVotes(PbftPeriod period) const; blk_hash_t getPeriodBlockHash(PbftPeriod period) const; @@ -240,18 +242,18 @@ class DbStorage : public std::enable_shared_from_this { std::optional getCurrentPillarBlockData() const; // DAG - void saveDagBlock(DagBlock const& blk, Batch* write_batch_p = nullptr); + void saveDagBlock(const std::shared_ptr& blk, Batch* write_batch_p = nullptr); std::shared_ptr getDagBlock(blk_hash_t const& hash); bool dagBlockInDb(blk_hash_t const& hash); std::set getBlocksByLevel(level_t level); level_t getLastBlocksLevel() const; std::vector> getDagBlocksAtLevel(level_t level, int number_of_levels); - void updateDagBlockCounters(std::vector blks); - std::map> getNonfinalizedDagBlocks(); + void updateDagBlockCounters(std::vector> blks); + std::map>> getNonfinalizedDagBlocks(); void removeDagBlockBatch(Batch& write_batch, blk_hash_t const& hash); void removeDagBlock(blk_hash_t const& hash); // Sortition params - void saveSortitionParamsChange(PbftPeriod period, const SortitionParamsChange& params, DbStorage::Batch& batch); + void saveSortitionParamsChange(PbftPeriod period, const SortitionParamsChange& params, Batch& batch); std::deque getLastSortitionParams(size_t count); std::optional getParamsChangeForPeriod(PbftPeriod period); @@ -350,7 +352,7 @@ class DbStorage : public std::enable_shared_from_this { std::vector getFinalizedDagBlockHashesByPeriod(PbftPeriod period); std::vector> getFinalizedDagBlockByPeriod(PbftPeriod period); - std::pair>> getLastPbftblockHashAndFinalizedDagBlockByPeriod( + std::pair>> getLastPbftBlockHashAndFinalizedDagBlockByPeriod( PbftPeriod period); // DPOS level to proposal period map @@ -457,6 +459,11 @@ class DbStorage : public std::enable_shared_from_this { checkStatus(batch.Put(handle(col), toSlice(k), toSlice(v))); } + template + void insert(Batch& batch, rocksdb::ColumnFamilyHandle* col, K const& k, V const& v) { + checkStatus(batch.Put(col, toSlice(k), toSlice(v))); + } + template void remove(Column const& col, K const& k) { checkStatus(db_->Delete(write_options_, handle(col), toSlice(k))); @@ -470,6 +477,4 @@ class DbStorage : public std::enable_shared_from_this { void forEach(Column const& col, OnEntry const& f); }; -using DB = DbStorage; - } // namespace taraxa diff --git a/libraries/core_libs/storage/src/migration/final_chain_header.cpp b/libraries/core_libs/storage/src/migration/final_chain_header.cpp new file mode 100644 index 0000000000..0730e16bdb --- /dev/null +++ b/libraries/core_libs/storage/src/migration/final_chain_header.cpp @@ -0,0 +1,52 @@ +#include "storage/migration/final_chain_header.hpp" + +#include "final_chain/data.hpp" + +namespace taraxa::storage::migration { + +FinalChainHeader::FinalChainHeader(std::shared_ptr db) : migration::Base(db) {} + +std::string FinalChainHeader::id() { return "FinalChainHeader"; } + +uint32_t FinalChainHeader::dbVersion() { return 1; } + +struct OldHeader : final_chain::BlockHeader { + RLP_FIELDS_DEFINE_INPLACE(hash, parent_hash, author, state_root, transactions_root, receipts_root, log_bloom, number, + gas_limit, gas_used, timestamp, total_reward, extra_data) +}; + +void FinalChainHeader::migrate(logger::Logger& log) { + auto orig_col = DbStorage::Columns::final_chain_blk_by_number; + auto copied_col = db_->copyColumn(db_->handle(orig_col), orig_col.name() + "-copy"); + + if (copied_col == nullptr) { + LOG(log) << "Migration " << id() << " skipped: Unable to copy " << orig_col.name() << " column"; + return; + } + + auto it = db_->getColumnIterator(copied_col.get()); + it->SeekToFirst(); + if (!it->Valid()) { + LOG(log) << "No blocks to migrate"; + return; + } + + uint64_t batch_size = 500000000; + for (; it->Valid(); it->Next()) { + uint64_t period; + memcpy(&period, it->key().data(), sizeof(uint64_t)); + std::string raw = it->value().ToString(); + auto header = std::make_shared(); + header->rlp(dev::RLP(raw)); + auto newBytes = header->serializeForDB(); + db_->insert(batch_, copied_col.get(), period, newBytes); + if (batch_.GetDataSize() > batch_size) { + db_->commitWriteBatch(batch_); + } + } + // commit the left over batch + db_->commitWriteBatch(batch_); + + db_->replaceColumn(orig_col, std::move(copied_col)); +} +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp index a7d2ea6924..a226662ef6 100644 --- a/libraries/core_libs/storage/src/migration/migration_manager.cpp +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -1,9 +1,15 @@ #include "storage/migration/migration_manager.hpp" +#include "storage/migration/final_chain_header.hpp" +#include "storage/migration/period_dag_blocks.hpp" #include "storage/migration/transaction_period.hpp" - namespace taraxa::storage::migration { -Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { LOG_OBJECTS_CREATE("MIGRATIONS"); } + +Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { + registerMigration(); + registerMigration(); + LOG_OBJECTS_CREATE("MIGRATIONS"); +} void Manager::applyMigration(std::shared_ptr m) { if (m->isApplied()) { LOG(log_si_) << "Skip \"" << m->id() << "\" migration. It was already applied"; diff --git a/libraries/core_libs/storage/src/migration/period_dag_blocks.cpp b/libraries/core_libs/storage/src/migration/period_dag_blocks.cpp new file mode 100644 index 0000000000..9d3ed46281 --- /dev/null +++ b/libraries/core_libs/storage/src/migration/period_dag_blocks.cpp @@ -0,0 +1,65 @@ +#include "storage/migration/period_dag_blocks.hpp" + +#include + +#include + +#include "pbft/period_data.hpp" + +namespace taraxa::storage::migration { + +PeriodDagBlocks::PeriodDagBlocks(std::shared_ptr db) : migration::Base(db) {} + +std::string PeriodDagBlocks::id() { return "PeriodDagBlocks"; } + +uint32_t PeriodDagBlocks::dbVersion() { return 1; } + +void PeriodDagBlocks::migrate(logger::Logger& log) { + auto it = db_->getColumnIterator(DbStorage::Columns::period_data); + it->SeekToFirst(); + if (!it->Valid()) { + return; + } + + uint64_t start_period, end_period; + memcpy(&start_period, it->key().data(), sizeof(uint64_t)); + + it->SeekToLast(); + if (!it->Valid()) { + it->Prev(); + } + memcpy(&end_period, it->key().data(), sizeof(uint64_t)); + const auto diff = (end_period - start_period) ? (end_period - start_period) : 1; + + uint64_t curr_progress = 0; + auto batch = db_->createWriteBatch(); + const size_t max_size = 500000000; + + // Get and save data in new format for all blocks + for (uint64_t period = start_period; period <= end_period; period++) { + const auto bts = db_->getPeriodDataRaw(period); + const auto db_rlp = dev::RLP(bts); + auto percentage = (period - start_period) * 100 / diff; + if (percentage > curr_progress) { + curr_progress = percentage; + LOG(log) << "Migration " << id() << " progress " << curr_progress << "%"; + } + // If there are no dag blocks in the period, skip it + if (db_rlp.itemCount() > 2 && db_rlp[2].itemCount() == 0) { + continue; + } + // skip if the period data is already in the new format + try { + auto period_data = ::taraxa::PeriodData::FromOldPeriodData(db_rlp); + db_->insert(batch, DbStorage::Columns::period_data, period, period_data.rlp()); + } catch (const dev::RLPException& e) { + continue; + } + if (batch.GetDataSize() > max_size) { + db_->commitWriteBatch(batch); + } + } + db_->commitWriteBatch(batch); + db_->compactColumn(DbStorage::Columns::period_data); +} +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/migration/transaction_period.cpp b/libraries/core_libs/storage/src/migration/transaction_period.cpp index f0dd0a6248..d2d20f107b 100644 --- a/libraries/core_libs/storage/src/migration/transaction_period.cpp +++ b/libraries/core_libs/storage/src/migration/transaction_period.cpp @@ -2,7 +2,8 @@ #include -#include "pbft/pbft_manager.hpp" +#include "common/thread_pool.hpp" +#include "common/util.hpp" namespace taraxa::storage::migration { @@ -13,7 +14,7 @@ std::string TransactionPeriod::id() { return "TransactionPeriod"; } uint32_t TransactionPeriod::dbVersion() { return 1; } void TransactionPeriod::migrate(logger::Logger& log) { - auto it = db_->getColumnIterator(DB::Columns::period_data); + auto it = db_->getColumnIterator(DbStorage::Columns::period_data); it->SeekToFirst(); if (!it->Valid()) { return; diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index fc5d2c1a62..b2c0401e7b 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -7,11 +7,13 @@ #include #include "config/version.hpp" +#include "dag/dag_block_bundle_rlp.hpp" #include "dag/sortition_params_manager.hpp" -#include "final_chain/final_chain.hpp" +#include "final_chain/data.hpp" #include "pillar_chain/pillar_block.hpp" #include "rocksdb/utilities/checkpoint.h" #include "storage/uint_comparator.hpp" +#include "transaction/system_transaction.hpp" #include "vote/pbft_vote.hpp" #include "vote/votes_bundle_rlp.hpp" @@ -149,7 +151,7 @@ std::unique_ptr DbStorage::copyColumn(rocksdb::Colu return nullptr; } - rocksdb::Checkpoint* checkpoint_raw; + rocksdb::Checkpoint* checkpoint_raw = nullptr; auto status = rocksdb::Checkpoint::Create(db_.get(), &checkpoint_raw); std::unique_ptr checkpoint(checkpoint_raw); checkStatus(status); @@ -160,7 +162,7 @@ std::unique_ptr DbStorage::copyColumn(rocksdb::Colu // Export dir should not exist before exporting the column family fs::remove_all(export_dir); - rocksdb::ExportImportFilesMetaData* metadata_raw; + rocksdb::ExportImportFilesMetaData* metadata_raw = nullptr; status = checkpoint->ExportColumnFamily(orig_column, export_dir, &metadata_raw); std::unique_ptr metadata(metadata_raw); checkStatus(status); @@ -174,7 +176,7 @@ std::unique_ptr DbStorage::copyColumn(rocksdb::Colu rocksdb::ImportColumnFamilyOptions import_options; import_options.move_files = move_data; - rocksdb::ColumnFamilyHandle* copied_column_raw; + rocksdb::ColumnFamilyHandle* copied_column_raw = nullptr; status = db_->CreateColumnFamilyWithImport(options, new_col_name, import_options, *metadata, &copied_column_raw); std::unique_ptr copied_column(copied_column_raw); checkStatus(status); @@ -306,7 +308,7 @@ bool DbStorage::createSnapshot(PbftPeriod period) { LOG(log_nf_) << "Creating DB snapshot on period: " << period; - // Create rocskd checkpoint/snapshot + // Create rocksdb checkpoint/snapshot rocksdb::Checkpoint* checkpoint; auto status = rocksdb::Checkpoint::Create(db_.get(), &checkpoint); // Scope is to delete checkpoint object as soon as we don't need it anymore @@ -398,7 +400,9 @@ std::optional DbStorage::getGenesisHash() { DbStorage::~DbStorage() { for (auto cf : handles_) { - checkStatus(db_->DestroyColumnFamilyHandle(cf)); + if (cf->GetName() != "default") { + checkStatus(db_->DestroyColumnFamilyHandle(cf)); + } } checkStatus(db_->Close()); } @@ -419,7 +423,7 @@ void DbStorage::checkStatus(rocksdb::Status const& status) { " SubCode: " + std::to_string(status.subcode()) + " Message:" + status.ToString()); } -DbStorage::Batch DbStorage::createWriteBatch() { return DbStorage::Batch(); } +Batch DbStorage::createWriteBatch() { return Batch(); } void DbStorage::commitWriteBatch(Batch& write_batch, rocksdb::WriteOptions const& opts) { auto status = db_->Write(opts, write_batch.GetWriteBatch()); @@ -438,7 +442,7 @@ std::shared_ptr DbStorage::getDagBlock(blk_hash_t const& hash) { if (period_data.size() > 0) { auto period_data_rlp = dev::RLP(period_data); auto dag_blocks_data = period_data_rlp[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - return std::make_shared(dag_blocks_data[data->second]); + return decodeDAGBlockBundleRlp(data->second, dag_blocks_data); } } return nullptr; @@ -483,12 +487,12 @@ std::vector> DbStorage::getDagBlocksAtLevel(level_t le return res; } -std::map> DbStorage::getNonfinalizedDagBlocks() { - std::map> res; +std::map>> DbStorage::getNonfinalizedDagBlocks() { + std::map>> res; auto i = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::dag_blocks))); for (i->SeekToFirst(); i->Valid(); i->Next()) { - DagBlock block(asBytes(i->value().ToString())); - res[block.getLevel()].emplace_back(std::move(block)); + auto block = std::make_shared(asBytes(i->value().ToString())); + res[block->getLevel()].emplace_back(std::move(block)); } return res; } @@ -508,39 +512,39 @@ void DbStorage::removeDagBlockBatch(Batch& write_batch, blk_hash_t const& hash) void DbStorage::removeDagBlock(blk_hash_t const& hash) { remove(Columns::dag_blocks, toSlice(hash)); } -void DbStorage::updateDagBlockCounters(std::vector blks) { +void DbStorage::updateDagBlockCounters(std::vector> blks) { // Lock is needed since we are editing some fields std::lock_guard u_lock(dag_blocks_mutex_); auto write_batch = createWriteBatch(); for (auto const& blk : blks) { - auto level = blk.getLevel(); + auto level = blk->getLevel(); auto block_hashes = getBlocksByLevel(level); - block_hashes.emplace(blk.getHash()); + block_hashes.emplace(blk->getHash()); dev::RLPStream blocks_stream(block_hashes.size()); for (auto const& hash : block_hashes) { blocks_stream << hash; } insert(write_batch, Columns::dag_blocks_level, toSlice(level), toSlice(blocks_stream.out())); dag_blocks_count_.fetch_add(1); - dag_edge_count_.fetch_add(blk.getTips().size() + 1); + dag_edge_count_.fetch_add(blk->getTips().size() + 1); } insert(write_batch, Columns::status, toSlice((uint8_t)StatusDbField::DagBlkCount), toSlice(dag_blocks_count_.load())); insert(write_batch, Columns::status, toSlice((uint8_t)StatusDbField::DagEdgeCount), toSlice(dag_edge_count_.load())); commitWriteBatch(write_batch); } -void DbStorage::saveDagBlock(DagBlock const& blk, Batch* write_batch_p) { +void DbStorage::saveDagBlock(const std::shared_ptr& blk, Batch* write_batch_p) { // Lock is needed since we are editing some fields std::lock_guard u_lock(dag_blocks_mutex_); auto write_batch_up = write_batch_p ? std::unique_ptr() : std::make_unique(); auto commit = !write_batch_p; auto& write_batch = write_batch_p ? *write_batch_p : *write_batch_up; - auto block_bytes = blk.rlp(true); - auto block_hash = blk.getHash(); + auto block_bytes = blk->rlp(true); + auto block_hash = blk->getHash(); insert(write_batch, Columns::dag_blocks, toSlice(block_hash.asBytes()), toSlice(block_bytes)); - auto level = blk.getLevel(); + auto level = blk->getLevel(); auto block_hashes = getBlocksByLevel(level); - block_hashes.emplace(blk.getHash()); + block_hashes.emplace(blk->getHash()); dev::RLPStream blocks_stream(block_hashes.size()); for (auto const& hash : block_hashes) { blocks_stream << hash; @@ -548,7 +552,7 @@ void DbStorage::saveDagBlock(DagBlock const& blk, Batch* write_batch_p) { insert(write_batch, Columns::dag_blocks_level, toSlice(level), toSlice(blocks_stream.out())); dag_blocks_count_.fetch_add(1); - dag_edge_count_.fetch_add(blk.getTips().size() + 1); + dag_edge_count_.fetch_add(blk->getTips().size() + 1); insert(write_batch, Columns::status, toSlice((uint8_t)StatusDbField::DagBlkCount), toSlice(dag_blocks_count_.load())); insert(write_batch, Columns::status, toSlice((uint8_t)StatusDbField::DagEdgeCount), toSlice(dag_edge_count_.load())); if (commit) { @@ -615,7 +619,7 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period, uint64_t dag_level for (auto period = start_period; period < end_period; period++) { // Find transactions included in the old blocks and delete data related to these transactions to free // disk space - const auto& [pbft_block_hash, dag_blocks] = getLastPbftblockHashAndFinalizedDagBlockByPeriod(period); + const auto& [pbft_block_hash, dag_blocks] = getLastPbftBlockHashAndFinalizedDagBlockByPeriod(period); for (const auto& dag_block : dag_blocks) { for (const auto& trx_hash : dag_block->getTrxs()) { @@ -682,8 +686,8 @@ void DbStorage::savePeriodData(const PeriodData& period_data, Batch& write_batch // Remove dag blocks from non finalized column in db and add dag_block_period in DB uint32_t block_pos = 0; for (auto const& block : period_data.dag_blocks) { - removeDagBlockBatch(write_batch, block.getHash()); - addDagBlockPeriodToBatch(block.getHash(), period, block_pos, write_batch); + removeDagBlockBatch(write_batch, block->getHash()); + addDagBlockPeriodToBatch(block->getHash(), period, block_pos, write_batch); block_pos++; } @@ -702,6 +706,15 @@ dev::bytes DbStorage::getPeriodDataRaw(PbftPeriod period) const { return asBytes(lookup(toSlice(period), Columns::period_data)); } +std::optional DbStorage::getPeriodData(PbftPeriod period) const { + auto period_data_bytes = getPeriodDataRaw(period); + if (period_data_bytes.empty()) { + return {}; + } + + return PeriodData{std::move(period_data_bytes)}; +} + void DbStorage::savePillarBlock(const std::shared_ptr& pillar_block) { insert(Columns::pillar_block, pillar_block->getPeriod(), pillar_block->getRlp()); } @@ -1246,41 +1259,38 @@ std::vector DbStorage::getFinalizedDagBlockHashesByPeriod(PbftPeriod std::vector ret; if (auto period_data = getPeriodDataRaw(period); period_data.size() > 0) { auto dag_blocks_data = dev::RLP(period_data)[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - ret.reserve(dag_blocks_data.size()); - std::transform(dag_blocks_data.begin(), dag_blocks_data.end(), std::back_inserter(ret), - [](const auto& dag_block) { return DagBlock(dag_block).getHash(); }); + const auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); + ret.reserve(dag_blocks.size()); + std::transform(dag_blocks.begin(), dag_blocks.end(), std::back_inserter(ret), + [](const auto& dag_block) { return dag_block->getHash(); }); } return ret; } std::vector> DbStorage::getFinalizedDagBlockByPeriod(PbftPeriod period) { - std::vector> ret; - if (auto period_data = getPeriodDataRaw(period); period_data.size() > 0) { - auto dag_blocks_data = dev::RLP(period_data)[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - ret.reserve(dag_blocks_data.size()); - for (auto const block : dag_blocks_data) { - ret.emplace_back(std::make_shared(block)); - } + auto period_data = getPeriodDataRaw(period); + if (period_data.empty()) { + return {}; } - return ret; + + auto dag_blocks_data = dev::RLP(period_data)[DAG_BLOCKS_POS_IN_PERIOD_DATA]; + return decodeDAGBlocksBundleRlp(dag_blocks_data); } std::pair>> -DbStorage::getLastPbftblockHashAndFinalizedDagBlockByPeriod(PbftPeriod period) { - std::vector> ret; - blk_hash_t last_pbft_block_hash; - if (auto period_data = getPeriodDataRaw(period); period_data.size() > 0) { - auto const period_data_rlp = dev::RLP(period_data); - auto dag_blocks_data = period_data_rlp[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - ret.reserve(dag_blocks_data.size()); - for (auto const block : dag_blocks_data) { - ret.emplace_back(std::make_shared(block)); - } - last_pbft_block_hash = - period_data_rlp[PBFT_BLOCK_POS_IN_PERIOD_DATA][PREV_BLOCK_HASH_POS_IN_PBFT_BLOCK].toHash(); +DbStorage::getLastPbftBlockHashAndFinalizedDagBlockByPeriod(PbftPeriod period) { + auto period_data = getPeriodDataRaw(period); + if (period_data.empty()) { + return {}; } - return {last_pbft_block_hash, ret}; + + const auto period_data_rlp = dev::RLP(period_data); + auto dag_blocks_data = period_data_rlp[DAG_BLOCKS_POS_IN_PERIOD_DATA]; + auto blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); + auto last_pbft_block_hash = + period_data_rlp[PBFT_BLOCK_POS_IN_PERIOD_DATA][PREV_BLOCK_HASH_POS_IN_PBFT_BLOCK].toHash(); + return {last_pbft_block_hash, std::move(blocks)}; } std::optional DbStorage::getProposalPeriodForDagLevel(uint64_t level) { diff --git a/libraries/logger/include/logger/logger.hpp b/libraries/logger/include/logger/logger.hpp index 380420bb31..bfb2ef212e 100644 --- a/libraries/logger/include/logger/logger.hpp +++ b/libraries/logger/include/logger/logger.hpp @@ -3,7 +3,6 @@ #include #include -#include "common/types.hpp" #include "logger/logger_config.hpp" namespace taraxa::logger { @@ -66,14 +65,6 @@ void InitLogging(Config& logging_config, const addr_t& node_id); mutable taraxa::logger::Logger log_dg_; \ mutable taraxa::logger::Logger log_tr_; -#define LOG_OBJECTS_DEFINE_SUB(group) \ - mutable taraxa::logger::Logger log_si_##group##_; \ - mutable taraxa::logger::Logger log_er_##group##_; \ - mutable taraxa::logger::Logger log_wr_##group##_; \ - mutable taraxa::logger::Logger log_nf_##group##_; \ - mutable taraxa::logger::Logger log_dg_##group##_; \ - mutable taraxa::logger::Logger log_tr_##group##_; - #define LOG_OBJECTS_CREATE(channel) \ log_si_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Silent, channel, node_addr); \ log_er_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Error, channel, node_addr); \ @@ -81,11 +72,3 @@ void InitLogging(Config& logging_config, const addr_t& node_id); log_nf_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Info, channel, node_addr); \ log_tr_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Trace, channel, node_addr); \ log_dg_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Debug, channel, node_addr); - -#define LOG_OBJECTS_CREATE_SUB(channel, group) \ - log_si_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Silent, channel, node_addr); \ - log_er_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Error, channel, node_addr); \ - log_wr_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Warning, channel, node_addr); \ - log_nf_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Info, channel, node_addr); \ - log_tr_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Trace, channel, node_addr); \ - log_dg_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Debug, channel, node_addr); diff --git a/libraries/logger/include/logger/logger_config.hpp b/libraries/logger/include/logger/logger_config.hpp index 4330c9f3c0..5df90de654 100644 --- a/libraries/logger/include/logger/logger_config.hpp +++ b/libraries/logger/include/logger/logger_config.hpp @@ -3,16 +3,17 @@ #include #include #include -#include +#include #include -#include #include "common/types.hpp" +namespace fs = std::filesystem; + namespace taraxa::logger { // Logger verbosity -// this enum must match enum in aleth logs to corectly support aleths library +// this enum must match enum in aleth logs to correctly support aleth's library enum Verbosity { Silent = -1, Error = 0, diff --git a/libraries/metrics/src/metrics_service.cpp b/libraries/metrics/src/metrics_service.cpp index 9e7ca0989a..a6f3e24c74 100644 --- a/libraries/metrics/src/metrics_service.cpp +++ b/libraries/metrics/src/metrics_service.cpp @@ -1,8 +1,9 @@ #include "metrics/metrics_service.hpp" -#include +#include +#include -#include +#include #include #include diff --git a/libraries/types/dag_block/CMakeLists.txt b/libraries/types/dag_block/CMakeLists.txt index c3f2271ca6..95192fed27 100644 --- a/libraries/types/dag_block/CMakeLists.txt +++ b/libraries/types/dag_block/CMakeLists.txt @@ -1,8 +1,10 @@ set(HEADERS include/dag/dag_block.hpp + include/dag/dag_block_bundle_rlp.hpp ) set(SOURCES src/dag_block.cpp + src/dag_block_bundle_rlp.cpp ) add_library(dag_block ${SOURCES} ${HEADERS}) diff --git a/libraries/types/dag_block/include/dag/dag_block.hpp b/libraries/types/dag_block/include/dag/dag_block.hpp index 91fa27fb58..55ccd4dd7e 100644 --- a/libraries/types/dag_block/include/dag/dag_block.hpp +++ b/libraries/types/dag_block/include/dag/dag_block.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/default_construct_copyable_movable.hpp" +#include "common/encoding_rlp.hpp" #include "vdf/sortition.hpp" namespace taraxa { @@ -50,6 +51,7 @@ class DagBlock { explicit DagBlock(Json::Value const &doc); explicit DagBlock(string const &json); explicit DagBlock(dev::RLP const &_rlp); + explicit DagBlock(dev::RLP const &_rlp, vec_trx_t &&trxs); explicit DagBlock(dev::bytes const &_rlp) : DagBlock(dev::RLP(_rlp)) {} /** @@ -102,7 +104,7 @@ class DagBlock { bool verifySig() const; void verifyVdf(const SortitionParams &vdf_config, const h256 &proposal_period_hash, const vrf_wrapper::vrf_pk_t &pk, uint64_t vote_count, uint64_t total_vote_count) const; - bytes rlp(bool include_sig) const; + bytes rlp(bool include_sig, bool include_trxs = true) const; /** * @brief Returns dag block data rlp stream @@ -110,7 +112,9 @@ class DagBlock { * @param include_sig * @return dev::RLPStream */ - dev::RLPStream streamRLP(bool include_sig) const; + dev::RLPStream streamRLP(bool include_sig, bool include_trxs = true) const; + + HAS_RLP_FIELDS private: blk_hash_t sha3(bool include_sig) const; diff --git a/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp b/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp new file mode 100644 index 0000000000..21b6be4135 --- /dev/null +++ b/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include +#include + +#include + +namespace taraxa { + +class DagBlock; + +/** @addtogroup DAG + * @{ + */ + +constexpr static size_t kDAGBlocksBundleRlpSize{3}; + +/** + * @brief Encodes pbft blocks into optimized blocks bundle rlp + * + * @param blocks + * @return blocks bundle rlp bytes + */ +dev::bytes encodeDAGBlocksBundleRlp(const std::vector>& blocks); + +/** + * @brief Decodes pbft blocks from optimized blocks bundle rlp + * + * @param blocks_bundle_rlp + * @return blocks + */ +std::vector> decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp); + +/** + * @brief Decodes single dag block from optimized blocks bundle rlp + * + * @param blocks_bundle_rlp + * @return block + */ +std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP& blocks_bundle_rlp); + +/** @}*/ + +} // namespace taraxa diff --git a/libraries/types/dag_block/src/dag_block.cpp b/libraries/types/dag_block/src/dag_block.cpp index d28605f988..c9620aa3b4 100644 --- a/libraries/types/dag_block/src/dag_block.cpp +++ b/libraries/types/dag_block/src/dag_block.cpp @@ -7,11 +7,11 @@ #include +#include "common/encoding_rlp.hpp" #include "common/util.hpp" namespace taraxa { -using std::to_string; using vrf_wrapper::VrfSortitionBase; DagBlock::DagBlock(blk_hash_t pivot, level_t level, vec_blk_t tips, vec_trx_t trxs, uint64_t est, sig_t sig, @@ -75,6 +75,13 @@ DagBlock::DagBlock(dev::RLP const &rlp) { vdf_ = vdf_sortition::VdfSortition(vdf_bytes); } +DagBlock::DagBlock(dev::RLP const &rlp, vec_trx_t &&trxs) { + dev::bytes vdf_bytes; + util::rlp_tuple(util::RLPDecoderRef(rlp, true), pivot_, level_, timestamp_, vdf_bytes, tips_, sig_, gas_estimation_); + vdf_ = vdf_sortition::VdfSortition(vdf_bytes); + trxs_ = std::move(trxs); +} + level_t DagBlock::extract_dag_level_from_rlp(const dev::RLP &rlp) { return rlp[kLevelPosInRlp].toInt(); } sig_t DagBlock::extract_signature_from_rlp(const dev::RLP &rlp) { return rlp[kSigPosInRlp].toHash(); } @@ -158,17 +165,26 @@ addr_t const &DagBlock::getSender() const { return cached_sender_; } -dev::RLPStream DagBlock::streamRLP(bool include_sig) const { +dev::RLPStream DagBlock::streamRLP(bool include_sig, bool include_trxs) const { dev::RLPStream s; - constexpr auto base_field_count = 7; - s.appendList(include_sig ? base_field_count + 1 : base_field_count); + auto base_field_count = 6; + if (include_sig) { + base_field_count += 1; + } + if (include_trxs) { + base_field_count += 1; + } + + s.appendList(base_field_count); s << pivot_; s << level_; s << timestamp_; s << vdf_.rlp(); s.appendVector(tips_); - s.appendVector(trxs_); + if (include_trxs) { + s.appendVector(trxs_); + } if (include_sig) { s << sig_; } @@ -177,8 +193,14 @@ dev::RLPStream DagBlock::streamRLP(bool include_sig) const { return s; } -bytes DagBlock::rlp(bool include_sig) const { return streamRLP(include_sig).invalidate(); } +bytes DagBlock::rlp(bool include_sig, bool include_trxs) const { + return streamRLP(include_sig, include_trxs).invalidate(); +} blk_hash_t DagBlock::sha3(bool include_sig) const { return dev::sha3(rlp(include_sig)); } +void DagBlock::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = DagBlock(encoding.value); } + +void DagBlock::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp(true)); } + } // namespace taraxa diff --git a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp new file mode 100644 index 0000000000..57831ccea6 --- /dev/null +++ b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp @@ -0,0 +1,107 @@ +#include "dag/dag_block_bundle_rlp.hpp" + +#include + +#include "common/types.hpp" +#include "dag/dag_block.hpp" + +namespace taraxa { + +dev::bytes encodeDAGBlocksBundleRlp(const std::vector>& blocks) { + if (blocks.empty()) { + return {}; + } + + std::unordered_map trx_hash_map; // Map to store transaction hash and its index + std::vector ordered_trx_hashes; + std::vector> indexes; + + for (const auto& block : blocks) { + std::vector idx; + idx.reserve(block->getTrxs().size()); + + for (const auto& trx : block->getTrxs()) { + if (const auto [_, ok] = trx_hash_map.try_emplace(trx, static_cast(trx_hash_map.size())); ok) { + ordered_trx_hashes.push_back(trx); // Track the insertion order + } + idx.push_back(trx_hash_map[trx]); + } + indexes.push_back(idx); + } + + dev::RLPStream blocks_bundle_rlp(kDAGBlocksBundleRlpSize); + blocks_bundle_rlp.appendList(ordered_trx_hashes.size()); + for (const auto& trx_hash : ordered_trx_hashes) { + blocks_bundle_rlp.append(trx_hash); + } + blocks_bundle_rlp.appendList(indexes.size()); + for (const auto& idx : indexes) { + blocks_bundle_rlp.appendList(idx.size()); + for (const auto& i : idx) { + blocks_bundle_rlp.append(i); + } + } + blocks_bundle_rlp.appendList(blocks.size()); + for (const auto& block : blocks) { + blocks_bundle_rlp.appendRaw(block->rlp(true, false)); + } + return blocks_bundle_rlp.invalidate(); +} + +std::vector> decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp) { + if (blocks_bundle_rlp.itemCount() != kDAGBlocksBundleRlpSize) { + return {}; + } + + std::vector ordered_trx_hashes; + std::vector> dags_trx_hashes; + + // Decode transaction hashes and + ordered_trx_hashes.reserve(blocks_bundle_rlp[0].itemCount()); + std::transform(blocks_bundle_rlp[0].begin(), blocks_bundle_rlp[0].end(), std::back_inserter(ordered_trx_hashes), + [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); + + for (const auto idx_rlp : blocks_bundle_rlp[1]) { + std::vector hashes; + hashes.reserve(idx_rlp.itemCount()); + std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), + [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); + + dags_trx_hashes.push_back(std::move(hashes)); + } + + std::vector> blocks; + blocks.reserve(blocks_bundle_rlp[2].itemCount()); + + for (size_t i = 0; i < blocks_bundle_rlp[2].itemCount(); i++) { + auto block = std::make_shared(blocks_bundle_rlp[2][i], std::move(dags_trx_hashes[i])); + blocks.push_back(std::move(block)); + } + + return blocks; +} + +std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP& blocks_bundle_rlp) { + if (blocks_bundle_rlp.itemCount() != kDAGBlocksBundleRlpSize) { + return {}; + } + if (index >= blocks_bundle_rlp[2].itemCount()) { + return {}; + } + + std::vector ordered_trx_hashes; + ordered_trx_hashes.reserve(blocks_bundle_rlp[0].itemCount()); + std::transform(blocks_bundle_rlp[0].begin(), blocks_bundle_rlp[0].end(), std::back_inserter(ordered_trx_hashes), + [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); + + const auto idx_rlp = blocks_bundle_rlp[1][index]; + std::vector hashes; + hashes.reserve(idx_rlp.itemCount()); + std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), + [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); + return std::make_shared(blocks_bundle_rlp[2][index], std::move(hashes)); +} + +/** @}*/ + +} // namespace taraxa diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index 42e5a7b203..9a7640fb58 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -5,10 +5,9 @@ #include #include +#include "common/encoding_rlp.hpp" #include "common/types.hpp" -#include "dag/dag_block.hpp" #include "pbft_block_extra_data.hpp" -#include "vote/pbft_vote.hpp" namespace taraxa { @@ -21,21 +20,10 @@ namespace taraxa { * hash, DAG blocks ordering hash, period number, timestamp, proposer address, and proposer signature. */ class PbftBlock { - blk_hash_t block_hash_; - blk_hash_t prev_block_hash_; - blk_hash_t dag_block_hash_as_pivot_; - blk_hash_t order_hash_; - blk_hash_t prev_state_root_hash_; - PbftPeriod period_; // Block index, PBFT head block is period 0, first PBFT block is period 1 - uint64_t timestamp_; - addr_t beneficiary_; - sig_t signature_; - std::vector reward_votes_; // Cert votes in previous period - std::optional extra_data_; - public: + PbftBlock() = default; PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, const blk_hash_t& order_hash, - const blk_hash_t& prev_state_root, PbftPeriod period, const addr_t& beneficiary, const secret_t& sk, + const blk_hash_t& final_chain_hash, PbftPeriod period, const addr_t& beneficiary, const secret_t& sk, std::vector&& reward_votes, const std::optional& extra_data = {}); explicit PbftBlock(const dev::RLP& rlp); explicit PbftBlock(const bytes& RLP); @@ -105,7 +93,11 @@ class PbftBlock { */ const auto& getOrderHash() const { return order_hash_; } - const auto& getPrevStateRoot() const { return prev_state_root_hash_; } + /** + * @brief Get final chain hash to tie final chain to the PBFT chain + * @return final chain hash + */ + const auto& getFinalChainHash() const { return final_chain_hash_; } /** * @brief Get period number @@ -139,6 +131,8 @@ class PbftBlock { const auto& getRewardVotes() const { return reward_votes_; } + HAS_RLP_FIELDS + private: /** * @brief Set PBFT block hash and block proposer address @@ -150,6 +144,19 @@ class PbftBlock { * */ void checkUniqueRewardVotes(); + + private: + blk_hash_t block_hash_; + blk_hash_t prev_block_hash_; + blk_hash_t dag_block_hash_as_pivot_; + blk_hash_t order_hash_; + blk_hash_t final_chain_hash_; + PbftPeriod period_; // Block index, PBFT head block is period 0, first PBFT block is period 1 + uint64_t timestamp_; + addr_t beneficiary_; + sig_t signature_; + std::vector reward_votes_; // Cert votes in previous period + std::optional extra_data_; }; std::ostream& operator<<(std::ostream& strm, const PbftBlock& pbft_blk); diff --git a/libraries/types/pbft_block/include/pbft/pbft_block_extra_data.hpp b/libraries/types/pbft_block/include/pbft/pbft_block_extra_data.hpp index 765069b45d..2e1bc4db20 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block_extra_data.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block_extra_data.hpp @@ -1,13 +1,12 @@ #pragma once +#include #include #include #include #include #include "common/types.hpp" -#include "dag/dag_block.hpp" -#include "vote/vote.hpp" namespace taraxa { diff --git a/libraries/types/pbft_block/include/pbft/period_data.hpp b/libraries/types/pbft_block/include/pbft/period_data.hpp index 5ea23e863d..f1eb17282b 100644 --- a/libraries/types/pbft_block/include/pbft/period_data.hpp +++ b/libraries/types/pbft_block/include/pbft/period_data.hpp @@ -5,6 +5,7 @@ #include +#include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "dag/dag_block.hpp" #include "transaction/transaction.hpp" @@ -29,12 +30,15 @@ class PeriodData { const std::vector>& previous_block_cert_votes, std::optional>>&& pillar_votes = {}); explicit PeriodData(const dev::RLP& all_rlp); - explicit PeriodData(bytes const& all_rlp); + explicit PeriodData(const bytes& all_rlp); + + static PeriodData FromOldPeriodData(const dev::RLP& rlp); + static bytes ToOldPeriodData(const bytes& rlp); std::shared_ptr pbft_blk; std::vector> previous_block_cert_votes; // These votes are the cert votes of previous block // which match reward votes in current pbft block - std::vector dag_blocks; + std::vector> dag_blocks; SharedTransactions transactions; // Pillar votes should be present only if pbft block contains also pillar block hash @@ -56,6 +60,8 @@ class PeriodData { * @brief Clear PBFT block, certify votes, DAG blocks, and transactions */ void clear(); + + HAS_RLP_FIELDS }; std::ostream& operator<<(std::ostream& strm, PeriodData const& b); diff --git a/libraries/types/pbft_block/src/pbft_block.cpp b/libraries/types/pbft_block/src/pbft_block.cpp index e0deec66e0..48e4f91461 100644 --- a/libraries/types/pbft_block/src/pbft_block.cpp +++ b/libraries/types/pbft_block/src/pbft_block.cpp @@ -4,6 +4,9 @@ #include +#include "common/encoding_rlp.hpp" +#include "common/util.hpp" + namespace taraxa { PbftBlock::PbftBlock(bytes const& b) : PbftBlock(dev::RLP(b)) {} @@ -12,11 +15,11 @@ PbftBlock::PbftBlock(dev::RLP const& rlp) { if (rlp.itemCount() == 9) { dev::bytes extra_data_bytes; util::rlp_tuple(util::RLPDecoderRef(rlp, true), prev_block_hash_, dag_block_hash_as_pivot_, order_hash_, - prev_state_root_hash_, period_, timestamp_, reward_votes_, extra_data_bytes, signature_); + final_chain_hash_, period_, timestamp_, reward_votes_, extra_data_bytes, signature_); extra_data_ = PbftBlockExtraData::fromBytes(extra_data_bytes); } else { util::rlp_tuple(util::RLPDecoderRef(rlp, true), prev_block_hash_, dag_block_hash_as_pivot_, order_hash_, - prev_state_root_hash_, period_, timestamp_, reward_votes_, signature_); + final_chain_hash_, period_, timestamp_, reward_votes_, signature_); } calculateHash_(); @@ -24,13 +27,13 @@ PbftBlock::PbftBlock(dev::RLP const& rlp) { } PbftBlock::PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, - const blk_hash_t& order_hash, const blk_hash_t& prev_state_root, PbftPeriod period, + const blk_hash_t& order_hash, const blk_hash_t& final_chain_hash, PbftPeriod period, const addr_t& beneficiary, const secret_t& sk, std::vector&& reward_votes, const std::optional& extra_data) : prev_block_hash_(prev_blk_hash), dag_block_hash_as_pivot_(dag_blk_hash_as_pivot), order_hash_(order_hash), - prev_state_root_hash_(prev_state_root), + final_chain_hash_(final_chain_hash), period_(period), beneficiary_(beneficiary), reward_votes_(reward_votes), @@ -56,7 +59,7 @@ void PbftBlock::calculateHash_() { if (!block_hash_) { block_hash_ = dev::sha3(rlp(true)); } else { - // Hash sould only be calculated once + // Hash should only be calculated once assert(false); } auto p = dev::recover(signature_, sha3(false)); @@ -84,7 +87,7 @@ Json::Value PbftBlock::getJson() const { json["prev_block_hash"] = prev_block_hash_.toString(); json["dag_block_hash_as_pivot"] = dag_block_hash_as_pivot_.toString(); json["order_hash"] = order_hash_.toString(); - json["prev_state_root_hash"] = prev_state_root_hash_.toString(); + json["final_chain_hash"] = final_chain_hash_.toString(); json["period"] = (Json::Value::UInt64)period_; json["timestamp"] = (Json::Value::UInt64)timestamp_; json["block_hash"] = block_hash_.toString(); @@ -107,7 +110,7 @@ void PbftBlock::streamRLP(dev::RLPStream& strm, bool include_sig) const { strm << prev_block_hash_; strm << dag_block_hash_as_pivot_; strm << order_hash_; - strm << prev_state_root_hash_; + strm << final_chain_hash_; strm << period_; strm << timestamp_; strm.appendVector(reward_votes_); @@ -126,6 +129,10 @@ bytes PbftBlock::rlp(bool include_sig) const { return strm.invalidate(); } +void PbftBlock::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = PbftBlock(encoding.value); } + +void PbftBlock::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp(true)); } + std::ostream& operator<<(std::ostream& strm, PbftBlock const& pbft_blk) { strm << pbft_blk.getJsonStr(); return strm; diff --git a/libraries/types/pbft_block/src/pbft_block_extra_data.cpp b/libraries/types/pbft_block/src/pbft_block_extra_data.cpp index 6a14c99c61..1ded6113dd 100644 --- a/libraries/types/pbft_block/src/pbft_block_extra_data.cpp +++ b/libraries/types/pbft_block/src/pbft_block_extra_data.cpp @@ -1,5 +1,7 @@ #include "pbft/pbft_block_extra_data.hpp" +#include "common/encoding_rlp.hpp" + namespace taraxa { PbftBlockExtraData::PbftBlockExtraData(const uint16_t major_version, const uint16_t minor_version, diff --git a/libraries/types/pbft_block/src/period_data.cpp b/libraries/types/pbft_block/src/period_data.cpp index 8bb8ba795a..7f9a09572b 100644 --- a/libraries/types/pbft_block/src/period_data.cpp +++ b/libraries/types/pbft_block/src/period_data.cpp @@ -1,11 +1,7 @@ #include "pbft/period_data.hpp" -#include - -#include "dag/dag_block.hpp" +#include "dag/dag_block_bundle_rlp.hpp" #include "pbft/pbft_block.hpp" -#include "transaction/transaction.hpp" -#include "vote/pbft_vote.hpp" #include "vote/votes_bundle_rlp.hpp" namespace taraxa { @@ -28,9 +24,8 @@ PeriodData::PeriodData(const dev::RLP& rlp) { previous_block_cert_votes = decodePbftVotesBundleRlp(votes_bundle_rlp); } - for (auto const dag_block_rlp : *it++) { - dag_blocks.emplace_back(dag_block_rlp); - } + const auto block_bundle_rlp = *it++; + dag_blocks = decodeDAGBlocksBundleRlp(block_bundle_rlp); for (auto const trx_rlp : *it++) { transactions.emplace_back(std::make_shared(trx_rlp)); @@ -55,9 +50,10 @@ bytes PeriodData::rlp() const { s.append(""); } - s.appendList(dag_blocks.size()); - for (auto const& b : dag_blocks) { - s.appendRaw(b.rlp(true)); + if (dag_blocks.empty()) { + s.append(""); + } else { + s.appendRaw(encodeDAGBlocksBundleRlp(dag_blocks)); } s.appendList(transactions.size()); @@ -81,6 +77,65 @@ void PeriodData::clear() { pillar_votes_.reset(); } +PeriodData PeriodData::FromOldPeriodData(const dev::RLP& rlp) { + PeriodData period_data; + auto it = rlp.begin(); + period_data.pbft_blk = std::make_shared(*it++); + + const auto votes_bundle_rlp = *it++; + if (period_data.pbft_blk->getPeriod() > 1) [[likely]] { + period_data.previous_block_cert_votes = decodePbftVotesBundleRlp(votes_bundle_rlp); + } + + for (auto const dag_block_rlp : *it++) { + period_data.dag_blocks.emplace_back(std::make_shared(dag_block_rlp)); + } + + for (auto const trx_rlp : *it++) { + period_data.transactions.emplace_back(std::make_shared(trx_rlp)); + } + + // Pillar votes are optional data of period data since ficus hardfork + if (rlp.itemCount() == 5) { + period_data.pillar_votes_ = decodePillarVotesBundleRlp(*it); + } + return period_data; +} + +bytes PeriodData::ToOldPeriodData(const bytes& rlp) { + PeriodData period_data(rlp); + const auto kRlpSize = period_data.pillar_votes_.has_value() ? kBaseRlpItemCount + 1 : kBaseRlpItemCount; + dev::RLPStream s(kRlpSize); + s.appendRaw(period_data.pbft_blk->rlp(true)); + + if (period_data.pbft_blk->getPeriod() > 1) [[likely]] { + s.appendRaw(encodePbftVotesBundleRlp(period_data.previous_block_cert_votes)); + } else { + s.append(""); + } + + s.appendList(period_data.dag_blocks.size()); + for (auto const& b : period_data.dag_blocks) { + s.appendRaw(b->rlp(true)); + } + + s.appendList(period_data.transactions.size()); + for (auto const& t : period_data.transactions) { + s.appendRaw(t->rlp()); + } + + // Pillar votes are optional data of period data since ficus hardfork + if (period_data.pillar_votes_.has_value()) { + s.appendRaw(encodePillarVotesBundleRlp(*period_data.pillar_votes_)); + } + + return s.invalidate(); +} + +void PeriodData::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = PeriodData(encoding.value); } + +void PeriodData::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp()); } + std::ostream& operator<<(std::ostream& strm, PeriodData const& b) { strm << "[PeriodData] : " << b.pbft_blk << " , num of votes " << b.previous_block_cert_votes.size() << std::endl; return strm; diff --git a/libraries/types/transaction/include/transaction/system_transaction.hpp b/libraries/types/transaction/include/transaction/system_transaction.hpp index e254abe678..d3c8f33c24 100644 --- a/libraries/types/transaction/include/transaction/system_transaction.hpp +++ b/libraries/types/transaction/include/transaction/system_transaction.hpp @@ -1,6 +1,5 @@ #pragma once -#include "common/constants.hpp" #include "transaction/transaction.hpp" namespace taraxa { diff --git a/libraries/types/transaction/include/transaction/transaction.hpp b/libraries/types/transaction/include/transaction/transaction.hpp index abc7eec047..04c8477e48 100644 --- a/libraries/types/transaction/include/transaction/transaction.hpp +++ b/libraries/types/transaction/include/transaction/transaction.hpp @@ -1,10 +1,10 @@ #pragma once -#include +#include #include #include -#include "common/default_construct_copyable_movable.hpp" +#include "common/encoding_rlp.hpp" #include "common/types.hpp" namespace taraxa { @@ -55,6 +55,7 @@ struct Transaction { const secret_t &sk, const std::optional &receiver = std::nullopt, uint64_t chain_id = 0); explicit Transaction(const dev::RLP &_rlp, bool verify_strict = false, const h256 &hash = {}); explicit Transaction(const bytes &_rlp, bool verify_strict = false, const h256 &hash = {}); + virtual ~Transaction() = default; auto isZero() const { return is_zero_; } const trx_hash_t &getHash() const; @@ -75,6 +76,8 @@ struct Transaction { const bytes &rlp() const; Json::Value toJSON() const; + + HAS_RLP_FIELDS }; using SharedTransaction = std::shared_ptr; diff --git a/libraries/types/transaction/src/system_transaction.cpp b/libraries/types/transaction/src/system_transaction.cpp index 378fd324b8..5924c799cf 100644 --- a/libraries/types/transaction/src/system_transaction.cpp +++ b/libraries/types/transaction/src/system_transaction.cpp @@ -40,7 +40,7 @@ SystemTransaction::SystemTransaction(const dev::RLP &_rlp, bool verify_strict, c const addr_t &SystemTransaction::getSender() const { return sender_; } -void SystemTransaction::streamRLP(dev::RLPStream &s, bool for_signature) const { +void SystemTransaction::streamRLP(dev::RLPStream &s, bool) const { // always serialize as for the signature s.appendList(9); s << nonce_ << gas_price_ << gas_; diff --git a/libraries/types/transaction/src/transaction.cpp b/libraries/types/transaction/src/transaction.cpp index 083b2a5de9..df928fbf3d 100644 --- a/libraries/types/transaction/src/transaction.cpp +++ b/libraries/types/transaction/src/transaction.cpp @@ -174,4 +174,8 @@ Json::Value Transaction::toJSON() const { return res; } +void Transaction::rlp(::taraxa::util::RLPDecoderRef encoding) { fromRLP(encoding.value, false, {}); } + +void Transaction::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp()); } + } // namespace taraxa diff --git a/libraries/types/vote/include/vote/pbft_vote.hpp b/libraries/types/vote/include/vote/pbft_vote.hpp index de2e3edd86..f8225eeb5d 100644 --- a/libraries/types/vote/include/vote/pbft_vote.hpp +++ b/libraries/types/vote/include/vote/pbft_vote.hpp @@ -1,5 +1,8 @@ #pragma once +#include + +#include "common/encoding_rlp.hpp" #include "common/vrf_wrapper.hpp" #include "vote.hpp" #include "vrf_sortition.hpp" @@ -87,7 +90,7 @@ class PbftVote : public Vote { bytes rlp(bool inc_sig = true, bool inc_weight = false) const; /** - * @brief Optimed Recursive Length Prefix + * @brief Optimized Recursive Length Prefix * @note Encode only vote's signature and vrf proof into the rlp * * @return bytes of RLP stream @@ -116,7 +119,7 @@ class PbftVote : public Vote { strm << " vote_signature: " << vote.vote_signature_ << std::endl; strm << " blockhash: " << vote.block_hash_ << std::endl; if (vote.weight_) strm << " weight: " << vote.weight_.value() << std::endl; - strm << " vrf_sorition: " << vote.vrf_sortition_ << std::endl; + strm << " vrf_sortition: " << vote.vrf_sortition_ << std::endl; return strm; } @@ -126,6 +129,8 @@ class PbftVote : public Vote { */ Json::Value toJSON() const; + HAS_RLP_FIELDS + private: /** * @brief Secure Hash Algorithm 3 diff --git a/libraries/types/vote/include/vote/vote.hpp b/libraries/types/vote/include/vote/vote.hpp index 5277c5712b..a51a07990f 100644 --- a/libraries/types/vote/include/vote/vote.hpp +++ b/libraries/types/vote/include/vote/vote.hpp @@ -16,6 +16,7 @@ class Vote { public: Vote() = default; Vote(const blk_hash_t& block_hash); + virtual ~Vote() = default; /** * @brief Sign the vote diff --git a/libraries/types/vote/include/vote/votes_bundle_rlp.hpp b/libraries/types/vote/include/vote/votes_bundle_rlp.hpp index 8a1a1072cc..2975dc455f 100644 --- a/libraries/types/vote/include/vote/votes_bundle_rlp.hpp +++ b/libraries/types/vote/include/vote/votes_bundle_rlp.hpp @@ -5,6 +5,8 @@ #include +#include "common/encoding_rlp.hpp" + namespace taraxa { class PbftVote; @@ -14,6 +16,7 @@ class PillarVote; * @{ */ +// TOOD[2865]: move to cpp file constexpr static size_t kPbftVotesBundleRlpSize{5}; /** @@ -32,6 +35,12 @@ dev::bytes encodePbftVotesBundleRlp(const std::vector> */ std::vector> decodePbftVotesBundleRlp(const dev::RLP& votes_bundle_rlp); +struct OptimizedPbftVotesBundle { + std::vector> votes; + + HAS_RLP_FIELDS +}; + constexpr static size_t kPillarVotesBundleRlpSize{3}; /** @@ -50,6 +59,12 @@ dev::bytes encodePillarVotesBundleRlp(const std::vector> decodePillarVotesBundleRlp(const dev::RLP& votes_bundle_rlp); +struct OptimizedPillarVotesBundle { + std::vector> pillar_votes; + + HAS_RLP_FIELDS +}; + /** @}*/ } // namespace taraxa diff --git a/libraries/types/vote/include/vote/vrf_sortition.hpp b/libraries/types/vote/include/vote/vrf_sortition.hpp index 4019308cf1..2137f769bd 100644 --- a/libraries/types/vote/include/vote/vrf_sortition.hpp +++ b/libraries/types/vote/include/vote/vrf_sortition.hpp @@ -1,3 +1,4 @@ +#pragma once #include #include diff --git a/libraries/types/vote/src/pbft_vote.cpp b/libraries/types/vote/src/pbft_vote.cpp index 3884f3fbe4..e3ce1c9c76 100644 --- a/libraries/types/vote/src/pbft_vote.cpp +++ b/libraries/types/vote/src/pbft_vote.cpp @@ -1,8 +1,17 @@ #include "vote/pbft_vote.hpp" +#include #include +#include +#include + +#include +#include +#include #include "common/encoding_rlp.hpp" +#include "vote/vote.hpp" +#include "vote/vrf_sortition.hpp" namespace taraxa { @@ -44,6 +53,7 @@ PbftVote::PbftVote(secret_t const& node_sk, VrfPbftSortition vrf_sortition, blk_ bool PbftVote::operator==(const PbftVote& other) const { return rlp() == other.rlp(); } +// TODO: rename to something else bytes PbftVote::rlp(bool inc_sig, bool inc_weight) const { dev::RLPStream s; uint32_t number_of_items = 2; @@ -115,4 +125,8 @@ PbftStep PbftVote::getStep() const { return vrf_sortition_.pbft_msg_.step_; } vote_hash_t PbftVote::sha3(bool inc_sig) const { return dev::sha3(rlp(inc_sig)); } +void PbftVote::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = PbftVote(encoding.value); } + +void PbftVote::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp()); } + } // namespace taraxa \ No newline at end of file diff --git a/libraries/types/vote/src/vote.cpp b/libraries/types/vote/src/vote.cpp index 8df1b12765..bfabf6b079 100644 --- a/libraries/types/vote/src/vote.cpp +++ b/libraries/types/vote/src/vote.cpp @@ -2,8 +2,6 @@ #include -#include "common/encoding_rlp.hpp" - namespace taraxa { Vote::Vote(const blk_hash_t& block_hash) : block_hash_(block_hash) {} @@ -35,7 +33,7 @@ const blk_hash_t& Vote::getBlockHash() const { return block_hash_; } bool Vote::verifyVote() const { auto pk = getVoter(); - return !pk.isZero(); // recoverd public key means that it was verified + return !pk.isZero(); // recovered public key means that it was verified } } // namespace taraxa \ No newline at end of file diff --git a/libraries/types/vote/src/votes_bundle_rlp.cpp b/libraries/types/vote/src/votes_bundle_rlp.cpp index d557dace94..5350459636 100644 --- a/libraries/types/vote/src/votes_bundle_rlp.cpp +++ b/libraries/types/vote/src/votes_bundle_rlp.cpp @@ -50,6 +50,13 @@ std::vector> decodePbftVotesBundleRlp(const dev::RLP& return votes; } +void OptimizedPbftVotesBundle::rlp(::taraxa::util::RLPDecoderRef encoding) { + votes = decodePbftVotesBundleRlp(encoding.value); +} +void OptimizedPbftVotesBundle::rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendRaw(encodePbftVotesBundleRlp(votes)); +} + dev::bytes encodePillarVotesBundleRlp(const std::vector>& votes) { if (votes.empty()) { assert(false); @@ -89,4 +96,11 @@ std::vector> decodePillarVotesBundleRlp(const dev::R return votes; } +void OptimizedPillarVotesBundle::rlp(::taraxa::util::RLPDecoderRef encoding) { + pillar_votes = decodePillarVotesBundleRlp(encoding.value); +} +void OptimizedPillarVotesBundle::rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendRaw(encodePillarVotesBundleRlp(pillar_votes)); +} + } // namespace taraxa \ No newline at end of file diff --git a/libraries/vdf/include/vdf/config.hpp b/libraries/vdf/include/vdf/config.hpp index b4fee1a8b8..e68f02acbd 100644 --- a/libraries/vdf/include/vdf/config.hpp +++ b/libraries/vdf/include/vdf/config.hpp @@ -3,7 +3,6 @@ #include #include "common/constants.hpp" -#include "common/encoding_rlp.hpp" namespace taraxa { diff --git a/libraries/vdf/include/vdf/sortition.hpp b/libraries/vdf/include/vdf/sortition.hpp index dfe51942e8..d24822cd09 100644 --- a/libraries/vdf/include/vdf/sortition.hpp +++ b/libraries/vdf/include/vdf/sortition.hpp @@ -1,18 +1,12 @@ #pragma once -#include - -#include "ProverWesolowski.h" #include "common/types.hpp" #include "common/vrf_wrapper.hpp" #include "libdevcore/CommonData.h" -#include "logger/logger.hpp" -#include "openssl/bn.h" #include "vdf/config.hpp" namespace taraxa::vdf_sortition { -using namespace vdf; using namespace vrf_wrapper; // It includes a vrf for difficulty adjustment diff --git a/libraries/vdf/src/config.cpp b/libraries/vdf/src/config.cpp index 5a01d4a0d1..f56c543c93 100644 --- a/libraries/vdf/src/config.cpp +++ b/libraries/vdf/src/config.cpp @@ -2,6 +2,8 @@ #include +#include "libdevcore/RLP.h" + namespace taraxa { int32_t fixFromOverflow(uint16_t value, int32_t change, uint16_t limit) { diff --git a/libraries/vdf/src/sortition.cpp b/libraries/vdf/src/sortition.cpp index c9ba151bfb..6248e3d901 100644 --- a/libraries/vdf/src/sortition.cpp +++ b/libraries/vdf/src/sortition.cpp @@ -3,9 +3,11 @@ #include #include -#include - +#include "ProverWesolowski.h" +#include "common/encoding_rlp.hpp" +#include "common/util.hpp" namespace taraxa::vdf_sortition { +using namespace vdf; VdfSortition::VdfSortition(const SortitionParams& config, const vrf_sk_t& sk, const bytes& vrf_input, uint64_t vote_count, uint64_t total_vote_count) diff --git a/programs/taraxa-bootnode/main.cpp b/programs/taraxa-bootnode/main.cpp index 7d5b98684c..fc3680d8e0 100644 --- a/programs/taraxa-bootnode/main.cpp +++ b/programs/taraxa-bootnode/main.cpp @@ -12,12 +12,12 @@ #include #include #include -#include #include "cli/config.hpp" #include "cli/tools.hpp" #include "common/jsoncpp.hpp" #include "common/thread_pool.hpp" +#include "common/util.hpp" #include "config/version.hpp" namespace po = boost::program_options; diff --git a/programs/taraxad/main.cpp b/programs/taraxad/main.cpp index 76eb64ca7f..17aa61336c 100644 --- a/programs/taraxad/main.cpp +++ b/programs/taraxad/main.cpp @@ -2,18 +2,23 @@ #include #include "cli/config.hpp" -#include "common/static_init.hpp" +#include "cli/tools.hpp" +#include "common/config_exception.hpp" +#include "common/init.hpp" #include "node/node.hpp" using namespace taraxa; -namespace bpo = boost::program_options; - int main(int argc, const char* argv[]) { static_init(); + + if (!checkDiskSpace(cli::tools::getTaraxaDefaultConfigFile(), 10)) { + std::cerr << "Insufficient disk space" << std::endl; + return 1; + } + try { cli::Config cli_conf(argc, argv); - if (cli_conf.nodeConfigured()) { auto node = std::make_shared(cli_conf.getNodeConfiguration()); node->start(); diff --git a/submodules/CMakeLists.txt b/submodules/CMakeLists.txt index 117cbcd879..ad5b9bced0 100644 --- a/submodules/CMakeLists.txt +++ b/submodules/CMakeLists.txt @@ -84,11 +84,15 @@ add_make_target(vrf libsodium.a "${VRF_AUTOTOOLS_CMD} && ${CMAKE_MAKE_PROGRAM} & # Add taraxa-evm set(EVM_BUILD_DIR ${BUILD_DIR_PREFIX}/taraxa-evm) ## add include of libs -set(EVM_BUILD_INCLUDE -I${rocksdb_SOURCE_DIR}/include) +set(EVM_BUILD_INCLUDE -I${CONAN_INCLUDE_DIRS_ROCKSDB}) ## set C flags set(EVM_BUILD_CGO_CFLAGS -O3 ${EVM_BUILD_INCLUDE}) ## add link of libs -set(EVM_BUILD_LD -L${CMAKE_BINARY_DIR}/lib -lrocksdb -L${CONAN_LIB_DIRS_LZ4} -llz4) +set(EVM_BUILD_LD -L${CONAN_LIB_DIRS_ROCKSDB} -lrocksdb -L${CONAN_LIB_DIRS_LZ4} -llz4) +## add path to homebew installed libs on macos +if (APPLE) + set(EVM_BUILD_LD ${EVM_BUILD_LD} -L/opt/homebrew/lib) +endif() ## if we need full static build use flag if(TARAXA_STATIC_BUILD) if (NOT APPLE) @@ -113,7 +117,7 @@ set(EVM_AFTER_BUILD_COMMAND ${EVM_AFTER_BUILD_COMMAND} && mv ${EVM_BUILD_DIR}/li ## final command set(EVM_LIBRARY_COMMAND ${EVM_BUILD_COMMAND} && ${EVM_AFTER_BUILD_COMMAND}) -file(GLOB_RECURSE TARAXA_EVM_SOURCES "taraxa-evm/*.go" ) +file(GLOB_RECURSE TARAXA_EVM_SOURCES CONFIGURE_DEPENDS "taraxa-evm/*.go" ) list(APPEND TARAXA_EVM_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/taraxa-evm/taraxa/C/common.h ${CMAKE_CURRENT_SOURCE_DIR}/taraxa-evm/taraxa/C/state.h) @@ -126,7 +130,7 @@ add_custom_command( COMMENT "Building taraxa-evm library") add_custom_target(taraxa_evm_build DEPENDS ${EVM_BUILD_DIR}/lib/${EVM_LIBRARY_NAME}) -add_dependencies(taraxa_evm_build rocksdb) +add_dependencies(taraxa_evm_build CONAN_PKG::rocksdb) add_library(taraxa-evm INTERFACE) add_dependencies(taraxa-evm taraxa_evm_build) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 2edc2f91df..bbaa08f3cb 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 2edc2f91df972511e1ccba6440a38efd32812ee2 +Subproject commit bbaa08f3cb99e7256c7a6b5c6c6d8cc2c8d4d930 diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 78b0e2aa21..44f7e183c0 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -29,7 +29,7 @@ target_link_libraries(dag_test test_util) add_test(dag_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/dag_test) add_executable(final_chain_test final_chain_test.cpp) -target_link_libraries(final_chain_test test_util vote) +target_link_libraries(final_chain_test test_util) add_test(final_chain_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/final_chain_test) add_executable(pillar_chain_test pillar_chain_test.cpp) @@ -37,7 +37,7 @@ target_link_libraries(pillar_chain_test test_util) add_test(pillar_chain_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/pillar_chain_test) add_executable(full_node_test full_node_test.cpp) -target_link_libraries(full_node_test test_util vote) +target_link_libraries(full_node_test test_util) add_test(full_node_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/full_node_test) add_executable(network_test network_test.cpp) diff --git a/tests/abi_test.cpp b/tests/abi_test.cpp index b8ad6a9f63..4daf6322cd 100644 --- a/tests/abi_test.cpp +++ b/tests/abi_test.cpp @@ -1,6 +1,7 @@ #include #include "common/encoding_solidity.hpp" +#include "logger/logger.hpp" #include "test_util/gtest.hpp" namespace taraxa::core_tests { diff --git a/tests/crypto_test.cpp b/tests/crypto_test.cpp index ff820d9062..b1c5888766 100644 --- a/tests/crypto_test.cpp +++ b/tests/crypto_test.cpp @@ -8,13 +8,12 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "common/vrf_wrapper.hpp" -#include "config/config.hpp" #include "logger/logger.hpp" #include "test_util/gtest.hpp" #include "vdf/sortition.hpp" -#include "vote/pbft_vote.hpp" +#include "vote/vrf_sortition.hpp" namespace taraxa::core_tests { using namespace vdf; @@ -66,7 +65,7 @@ TEST_F(CryptoTest, vrf_proof_verify) { EXPECT_TRUE(isValidVrfPublicKey(pk)); EXPECT_TRUE(isValidVrfPublicKey(pk2)); - auto msg = getRlpBytes("helloworld!"); + auto msg = getRlpBytes("hello world!"); auto proof = getVrfProof(sk, msg); EXPECT_TRUE(proof); auto output = getVrfOutput(pk, proof.value(), msg); @@ -312,7 +311,7 @@ TEST_F(CryptoTest, DISABLED_compute_vdf_solution_cost_time) { "0b6627a6680e01cea3d9f36fa797f7f34e8869c3a526d9ed63ed8170e35542aad05dc12c" "1df1edc9f3367fba550b7971fc2de6c5998d8784051c5be69abc9644"); level_t level = 1; - uint16_t threshold_upper = 0; // diffculty == diffuclty_stale + uint16_t threshold_upper = 0; // difficulty == difficulty_stale uint16_t difficulty_min = 0; uint16_t difficulty_max = 0; uint16_t lambda_bound = 100; diff --git a/tests/dag_block_test.cpp b/tests/dag_block_test.cpp index 7e0f639028..c01c5cc6e8 100644 --- a/tests/dag_block_test.cpp +++ b/tests/dag_block_test.cpp @@ -5,7 +5,7 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "common/types.hpp" #include "common/util.hpp" #include "dag/dag.hpp" @@ -18,16 +18,11 @@ #include "vdf/sortition.hpp" namespace taraxa::core_tests { -const unsigned NUM_BLK = 4; -const unsigned BLK_TRX_LEN = 4; -const unsigned BLK_TRX_OVERLAP = 1; using namespace vdf_sortition; struct DagBlockTest : NodesTest {}; struct DagBlockMgrTest : NodesTest {}; -auto g_blk_samples = samples::createMockDagBlkSamples(0, NUM_BLK, 0, BLK_TRX_LEN, BLK_TRX_OVERLAP); - auto g_secret = dev::Secret("3800b2875669d9b2053c1aff9224ecfdc411423aac5b5a73d7a45ced1c3b9dcd", dev::Secret::ConstructFromStringType::FromHex); auto g_key_pair = dev::KeyPair(g_secret); @@ -230,14 +225,16 @@ TEST_F(DagBlockMgrTest, incorrect_tx_estimation) { // transactions.size and estimations size is not equal { - DagBlock blk(dag_genesis, propose_level, {}, {trx->getHash()}, {}, vdf1, node->getSecretKey()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, 0, vdf1, + node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(std::move(blk)).first, DagManager::VerifyBlockReturnType::IncorrectTransactionsEstimation); } // wrong estimated tx { - DagBlock blk(dag_genesis, propose_level, {}, {trx->getHash()}, 100, vdf1, node->getSecretKey()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, 100, vdf1, + node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(std::move(blk)).first, DagManager::VerifyBlockReturnType::IncorrectTransactionsEstimation); } @@ -265,8 +262,9 @@ TEST_F(DagBlockMgrTest, dag_block_tips_verification) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trx}); vdf.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk(dag_genesis, propose_level, {}, {trx->getHash()}, 100000, vdf, node->getSecretKey()); - dag_blocks_hashes.push_back(blk.getHash()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, 100000, + vdf, node->getSecretKey()); + dag_blocks_hashes.push_back(blk->getHash()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk).first, DagManager::VerifyBlockReturnType::Verified); EXPECT_TRUE(node->getDagManager()->addDagBlock(std::move(blk), {trx}).first); } @@ -275,21 +273,21 @@ TEST_F(DagBlockMgrTest, dag_block_tips_verification) { vdf.computeVdfSolution(vdf_config, vdf_msg, false); // Verify block over the kDagBlockMaxTips is rejected - DagBlock blk_over_limit(dag_genesis, propose_level, dag_blocks_hashes, {trxs[0]->getHash()}, 100000, vdf, - node->getSecretKey()); + auto blk_over_limit = std::make_shared(dag_genesis, propose_level, dag_blocks_hashes, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_over_limit).first, DagManager::VerifyBlockReturnType::FailedTipsVerification); // Verify block at kDagBlockMaxTips is accepted dag_blocks_hashes.resize(kDagBlockMaxTips); - DagBlock blk_at_limit(dag_genesis, propose_level, dag_blocks_hashes, {trxs[0]->getHash()}, 100000, vdf, - node->getSecretKey()); + auto blk_at_limit = std::make_shared(dag_genesis, propose_level, dag_blocks_hashes, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_at_limit).first, DagManager::VerifyBlockReturnType::Verified); // Verify block below kDagBlockMaxTips is accepted dag_blocks_hashes.resize(kDagBlockMaxTips - 1); - DagBlock blk_under_limit(dag_genesis, propose_level, dag_blocks_hashes, {trxs[0]->getHash()}, 100000, vdf, - node->getSecretKey()); + auto blk_under_limit = std::make_shared(dag_genesis, propose_level, dag_blocks_hashes, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_under_limit).first, DagManager::VerifyBlockReturnType::Verified); auto dag_blocks_hashes_with_duplicate_pivot = dag_blocks_hashes; @@ -299,14 +297,16 @@ TEST_F(DagBlockMgrTest, dag_block_tips_verification) { dag_blocks_hashes_with_duplicate_tip.push_back(dag_blocks_hashes[0]); // Verify block with duplicate pivot is rejected - DagBlock blk_with_duplicate_pivot(dag_genesis, propose_level, dag_blocks_hashes_with_duplicate_pivot, - {trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); + auto blk_with_duplicate_pivot = + std::make_shared(dag_genesis, propose_level, dag_blocks_hashes_with_duplicate_pivot, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_with_duplicate_pivot).first, DagManager::VerifyBlockReturnType::FailedTipsVerification); // Verify block with duplicate tip is rejected - DagBlock blk_with_duplicate_tip(dag_genesis, propose_level, dag_blocks_hashes_with_duplicate_tip, - {trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); + auto blk_with_duplicate_tip = + std::make_shared(dag_genesis, propose_level, dag_blocks_hashes_with_duplicate_tip, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_with_duplicate_tip).first, DagManager::VerifyBlockReturnType::FailedTipsVerification); } @@ -334,8 +334,9 @@ TEST_F(DagBlockMgrTest, dag_block_tips_proposal) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trx}); vdf.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk(dag_genesis, propose_level, {}, {trx->getHash()}, dag_block_gas, vdf, node->getSecretKey()); - dag_blocks_hashes.push_back(blk.getHash()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, + dag_block_gas, vdf, node->getSecretKey()); + dag_blocks_hashes.push_back(blk->getHash()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk).first, DagManager::VerifyBlockReturnType::Verified); EXPECT_TRUE(node->getDagManager()->addDagBlock(std::move(blk), {trx}).first); } @@ -366,8 +367,9 @@ TEST_F(DagBlockMgrTest, dag_block_tips_proposal) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_blocks_hashes[0], {trx}); vdf.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk(dag_blocks_hashes[0], propose_level, {}, {trx->getHash()}, 100000, vdf, node->getSecretKey()); - dag_blocks_hashes.push_back(blk.getHash()); + auto blk = std::make_shared(dag_blocks_hashes[0], propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, + 100000, vdf, node->getSecretKey()); + dag_blocks_hashes.push_back(blk->getHash()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk).first, DagManager::VerifyBlockReturnType::Verified); EXPECT_TRUE(node->getDagManager()->addDagBlock(std::move(blk), {trx}).first); } @@ -387,8 +389,9 @@ TEST_F(DagBlockMgrTest, dag_block_tips_proposal) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trxs[0]}); vdf.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk(dag_genesis, propose_level, {}, {trxs[0]->getHash()}, 100000, vdf, node_cfgs[1].node_secret); - dag_blocks_hashes.push_back(blk.getHash()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, 100000, + vdf, node_cfgs[1].node_secret); + dag_blocks_hashes.push_back(blk->getHash()); EXPECT_TRUE(node->getDagManager()->addDagBlock(std::move(blk), {trxs[0]}).first); selected_tips = node->getDagBlockProposer()->selectDagBlockTips(dag_blocks_hashes, selection_gas_limit); @@ -406,6 +409,7 @@ TEST_F(DagBlockMgrTest, too_big_dag_block) { // make config auto node_cfgs = make_node_cfgs(1, 1, 20); node_cfgs.front().genesis.dag.gas_limit = 500000; + node_cfgs.front().propose_dag_gas_limit = 500000; auto node = create_nodes(node_cfgs).front(); auto db = node->getDB(); @@ -434,7 +438,8 @@ TEST_F(DagBlockMgrTest, too_big_dag_block) { vdf1.computeVdfSolution(vdf_config, vdf_msg, false); { - DagBlock blk(dag_genesis, propose_level, {}, hashes, estimations, vdf1, node->getSecretKey()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, hashes, estimations, vdf1, + node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(std::move(blk)).first, DagManager::VerifyBlockReturnType::BlockTooBig); } } diff --git a/tests/dag_test.cpp b/tests/dag_test.cpp index 9a74fad8dc..97a4cfe7db 100644 --- a/tests/dag_test.cpp +++ b/tests/dag_test.cpp @@ -1,6 +1,6 @@ #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "common/types.hpp" #include "dag/dag_manager.hpp" #include "logger/logger.hpp" @@ -135,27 +135,37 @@ TEST_F(DagTest, compute_epoch) { auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); auto pbft_chain = std::make_shared(addr_t(), db_ptr); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); - - DagBlock blkA(GENESIS, 1, {}, {trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); - DagBlock blkB(GENESIS, 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(1), blk_hash_t(3), addr_t(1)); - DagBlock blkC(blk_hash_t(2), 2, {blk_hash_t(3)}, {}, sig_t(1), blk_hash_t(4), addr_t(1)); - DagBlock blkD(blk_hash_t(2), 2, {}, {}, sig_t(1), blk_hash_t(5), addr_t(1)); - DagBlock blkE(blk_hash_t(4), 3, {blk_hash_t(5), blk_hash_t(7)}, {}, sig_t(1), blk_hash_t(6), addr_t(1)); - DagBlock blkF(blk_hash_t(3), 2, {}, {}, sig_t(1), blk_hash_t(7), addr_t(1)); - DagBlock blkG(blk_hash_t(2), 2, {}, {trx_hash_t(4)}, sig_t(1), blk_hash_t(8), addr_t(1)); - DagBlock blkH(blk_hash_t(6), 5, {blk_hash_t(8), blk_hash_t(10)}, {}, sig_t(1), blk_hash_t(9), addr_t(1)); - DagBlock blkI(blk_hash_t(11), 4, {blk_hash_t(4)}, {}, sig_t(1), blk_hash_t(10), addr_t(1)); - DagBlock blkJ(blk_hash_t(7), 3, {}, {}, sig_t(1), blk_hash_t(11), addr_t(1)); - DagBlock blkK(blk_hash_t(9), 6, {}, {}, sig_t(1), blk_hash_t(12), addr_t(1)); - - const auto blkA_hash = blkA.getHash(); - const auto blkC_hash = blkC.getHash(); - const auto blkE_hash = blkE.getHash(); - const auto blkH_hash = blkH.getHash(); - const auto blkK_hash = blkK.getHash(); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); + + auto blkA = + std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); + auto blkB = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(3), trx_hash_t(4)}, sig_t(1), + blk_hash_t(3), addr_t(1)); + auto blkC = std::make_shared(blk_hash_t(2), 2, vec_blk_t{blk_hash_t(3)}, vec_trx_t{}, sig_t(1), + blk_hash_t(4), addr_t(1)); + auto blkD = + std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(5), addr_t(1)); + auto blkE = std::make_shared(blk_hash_t(4), 3, vec_blk_t{blk_hash_t(5), blk_hash_t(7)}, vec_trx_t{}, + sig_t(1), blk_hash_t(6), addr_t(1)); + auto blkF = + std::make_shared(blk_hash_t(3), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(7), addr_t(1)); + auto blkG = std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{trx_hash_t(4)}, sig_t(1), + blk_hash_t(8), addr_t(1)); + auto blkH = std::make_shared(blk_hash_t(6), 5, vec_blk_t{blk_hash_t(8), blk_hash_t(10)}, vec_trx_t{}, + sig_t(1), blk_hash_t(9), addr_t(1)); + auto blkI = std::make_shared(blk_hash_t(11), 4, vec_blk_t{blk_hash_t(4)}, vec_trx_t{}, sig_t(1), + blk_hash_t(10), addr_t(1)); + auto blkJ = + std::make_shared(blk_hash_t(7), 3, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(11), addr_t(1)); + auto blkK = + std::make_shared(blk_hash_t(9), 6, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(12), addr_t(1)); + + const auto blkA_hash = blkA->getHash(); + const auto blkC_hash = blkC->getHash(); + const auto blkE_hash = blkE->getHash(); + const auto blkH_hash = blkH->getHash(); + const auto blkK_hash = blkK->getHash(); EXPECT_TRUE(mgr->addDagBlock(std::move(blkA)).first); EXPECT_TRUE(mgr->addDagBlock(std::move(blkB)).first); @@ -228,23 +238,35 @@ TEST_F(DagTest, dag_expiry) { auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); auto pbft_chain = std::make_shared(addr_t(), db_ptr); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared( - node_cfgs[0].genesis.dag_genesis_block, addr_t(), node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, - trx_mgr, pbft_chain, nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state, false, 0, 3, EXPIRY_LIMIT); - - DagBlock blkA(GENESIS, 1, {}, {trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); - DagBlock blkB(GENESIS, 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(1), blk_hash_t(3), addr_t(1)); - DagBlock blkC(blk_hash_t(2), 2, {blk_hash_t(3)}, {}, sig_t(1), blk_hash_t(4), addr_t(1)); - DagBlock blkD(blk_hash_t(2), 2, {}, {}, sig_t(1), blk_hash_t(5), addr_t(1)); - DagBlock blkE(blk_hash_t(4), 3, {blk_hash_t(5), blk_hash_t(7)}, {}, sig_t(1), blk_hash_t(6), addr_t(1)); - DagBlock blkF(blk_hash_t(3), 2, {}, {}, sig_t(1), blk_hash_t(7), addr_t(1)); - DagBlock blkG(blk_hash_t(2), 2, {}, {trx_hash_t(4)}, sig_t(1), blk_hash_t(8), addr_t(1)); - DagBlock blkH(blk_hash_t(6), 5, {blk_hash_t(8), blk_hash_t(10)}, {}, sig_t(1), blk_hash_t(9), addr_t(1)); - DagBlock blkI(blk_hash_t(11), 4, {blk_hash_t(4)}, {}, sig_t(1), blk_hash_t(10), addr_t(1)); - DagBlock blkJ(blk_hash_t(7), 3, {}, {}, sig_t(1), blk_hash_t(11), addr_t(1)); - DagBlock blkK(blk_hash_t(9), 6, {}, {}, sig_t(1), blk_hash_t(12), addr_t(1)); - - const auto blkK_hash = blkK.getHash(); + node_cfgs[0].max_levels_per_period = 3; + node_cfgs[0].dag_expiry_limit = EXPIRY_LIMIT; + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); + + auto blkA = + std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); + auto blkB = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(3), trx_hash_t(4)}, sig_t(1), + blk_hash_t(3), addr_t(1)); + auto blkC = std::make_shared(blk_hash_t(2), 2, vec_blk_t{blk_hash_t(3)}, vec_trx_t{}, sig_t(1), + blk_hash_t(4), addr_t(1)); + auto blkD = + std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(5), addr_t(1)); + auto blkE = std::make_shared(blk_hash_t(4), 3, vec_blk_t{blk_hash_t(5), blk_hash_t(7)}, vec_trx_t{}, + sig_t(1), blk_hash_t(6), addr_t(1)); + auto blkF = + std::make_shared(blk_hash_t(3), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(7), addr_t(1)); + auto blkG = std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{trx_hash_t(4)}, sig_t(1), + blk_hash_t(8), addr_t(1)); + auto blkH = std::make_shared(blk_hash_t(6), 5, vec_blk_t{blk_hash_t(8), blk_hash_t(10)}, vec_trx_t{}, + sig_t(1), blk_hash_t(9), addr_t(1)); + auto blkI = std::make_shared(blk_hash_t(11), 4, vec_blk_t{blk_hash_t(4)}, vec_trx_t{}, sig_t(1), + blk_hash_t(10), addr_t(1)); + auto blkJ = + std::make_shared(blk_hash_t(7), 3, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(11), addr_t(1)); + auto blkK = + std::make_shared(blk_hash_t(9), 6, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(12), addr_t(1)); + + const auto blkK_hash = blkK->getHash(); mgr->addDagBlock(std::move(blkA)); mgr->addDagBlock(std::move(blkB)); @@ -270,33 +292,35 @@ TEST_F(DagTest, dag_expiry) { mgr->setDagBlockOrder(blkK_hash, 1, orders); // Verify expiry level - EXPECT_EQ(mgr->getDagExpiryLevel(), blkK.getLevel() - EXPIRY_LIMIT); + EXPECT_EQ(mgr->getDagExpiryLevel(), blkK->getLevel() - EXPIRY_LIMIT); - DagBlock blk_under_limit(blk_hash_t(2), blkK.getLevel() - EXPIRY_LIMIT - 1, {}, {}, sig_t(1), blk_hash_t(13), - addr_t(1)); - DagBlock blk_at_limit(blk_hash_t(4), blkK.getLevel() - EXPIRY_LIMIT, {}, {}, sig_t(1), blk_hash_t(14), addr_t(1)); - DagBlock blk_over_limit(blk_hash_t(11), blkK.getLevel() - EXPIRY_LIMIT + 1, {}, {}, sig_t(1), blk_hash_t(15), - addr_t(1)); + auto blk_under_limit = std::make_shared(blk_hash_t(2), blkK->getLevel() - EXPIRY_LIMIT - 1, vec_blk_t{}, + vec_trx_t{}, sig_t(1), blk_hash_t(13), addr_t(1)); + auto blk_at_limit = std::make_shared(blk_hash_t(4), blkK->getLevel() - EXPIRY_LIMIT, vec_blk_t{}, + vec_trx_t{}, sig_t(1), blk_hash_t(14), addr_t(1)); + auto blk_over_limit = std::make_shared(blk_hash_t(11), blkK->getLevel() - EXPIRY_LIMIT + 1, vec_blk_t{}, + vec_trx_t{}, sig_t(1), blk_hash_t(15), addr_t(1)); // Block under limit is not accepted to DAG since it is expired EXPECT_FALSE(mgr->addDagBlock(std::move(blk_under_limit)).first); EXPECT_TRUE(mgr->addDagBlock(std::move(blk_at_limit)).first); EXPECT_TRUE(mgr->addDagBlock(std::move(blk_over_limit)).first); - EXPECT_FALSE(db_ptr->dagBlockInDb(blk_under_limit.getHash())); - EXPECT_TRUE(db_ptr->dagBlockInDb(blk_at_limit.getHash())); - EXPECT_TRUE(db_ptr->dagBlockInDb(blk_over_limit.getHash())); + EXPECT_FALSE(db_ptr->dagBlockInDb(blk_under_limit->getHash())); + EXPECT_TRUE(db_ptr->dagBlockInDb(blk_at_limit->getHash())); + EXPECT_TRUE(db_ptr->dagBlockInDb(blk_over_limit->getHash())); - DagBlock blk_new_anchor(blk_hash_t(12), 7, {}, {}, sig_t(1), blk_hash_t(16), addr_t(1)); + auto blk_new_anchor = + std::make_shared(blk_hash_t(12), 7, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(16), addr_t(1)); EXPECT_TRUE(mgr->addDagBlock(std::move(blk_new_anchor)).first); - orders = mgr->getDagBlockOrder(blk_new_anchor.getHash(), 2); - mgr->setDagBlockOrder(blk_new_anchor.getHash(), 2, orders); + orders = mgr->getDagBlockOrder(blk_new_anchor->getHash(), 2); + mgr->setDagBlockOrder(blk_new_anchor->getHash(), 2, orders); // Verify that the block blk_at_limit which was initially part of the DAG became expired once new anchor moved the // limit - EXPECT_FALSE(db_ptr->dagBlockInDb(blk_under_limit.getHash())); - EXPECT_FALSE(db_ptr->dagBlockInDb(blk_at_limit.getHash())); - EXPECT_TRUE(db_ptr->dagBlockInDb(blk_over_limit.getHash())); + EXPECT_FALSE(db_ptr->dagBlockInDb(blk_under_limit->getHash())); + EXPECT_FALSE(db_ptr->dagBlockInDb(blk_at_limit->getHash())); + EXPECT_TRUE(db_ptr->dagBlockInDb(blk_over_limit->getHash())); } TEST_F(DagTest, receive_block_in_order) { @@ -304,13 +328,14 @@ TEST_F(DagTest, receive_block_in_order) { auto pbft_chain = std::make_shared(addr_t(), db_ptr); auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); - DagBlock blk1(GENESIS, 1, {}, {}, sig_t(777), blk_hash_t(1), addr_t(15)); - DagBlock blk2(blk_hash_t(1), 2, {}, {}, sig_t(777), blk_hash_t(2), addr_t(15)); - DagBlock blk3(GENESIS, 3, {blk_hash_t(1), blk_hash_t(2)}, {}, sig_t(777), blk_hash_t(3), addr_t(15)); + auto blk1 = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{}, sig_t(777), blk_hash_t(1), addr_t(15)); + auto blk2 = + std::make_shared(blk_hash_t(1), 2, vec_blk_t{}, vec_trx_t{}, sig_t(777), blk_hash_t(2), addr_t(15)); + auto blk3 = std::make_shared(GENESIS, 3, vec_blk_t{blk_hash_t(1), blk_hash_t(2)}, vec_trx_t{}, sig_t(777), + blk_hash_t(3), addr_t(15)); mgr->addDagBlock(std::move(blk1)); mgr->addDagBlock(std::move(blk2)); @@ -336,27 +361,37 @@ TEST_F(DagTest, compute_epoch_2) { auto pbft_chain = std::make_shared(addr_t(), db_ptr); auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); - - DagBlock blkA(GENESIS, 1, {}, {trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); - DagBlock blkB(GENESIS, 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(1), blk_hash_t(3), addr_t(1)); - DagBlock blkC(blk_hash_t(2), 2, {blk_hash_t(3)}, {}, sig_t(1), blk_hash_t(4), addr_t(1)); - DagBlock blkD(blk_hash_t(2), 2, {}, {}, sig_t(1), blk_hash_t(5), addr_t(1)); - DagBlock blkE(blk_hash_t(4), 3, {blk_hash_t(5), blk_hash_t(7)}, {}, sig_t(1), blk_hash_t(6), addr_t(1)); - DagBlock blkF(blk_hash_t(3), 2, {}, {}, sig_t(1), blk_hash_t(7), addr_t(1)); - DagBlock blkG(blk_hash_t(2), 2, {}, {trx_hash_t(4)}, sig_t(1), blk_hash_t(8), addr_t(1)); - DagBlock blkH(blk_hash_t(6), 5, {blk_hash_t(8), blk_hash_t(10)}, {}, sig_t(1), blk_hash_t(9), addr_t(1)); - DagBlock blkI(blk_hash_t(11), 4, {blk_hash_t(4)}, {}, sig_t(1), blk_hash_t(10), addr_t(1)); - DagBlock blkJ(blk_hash_t(7), 3, {}, {}, sig_t(1), blk_hash_t(11), addr_t(1)); - DagBlock blkK(blk_hash_t(10), 5, {}, {}, sig_t(1), blk_hash_t(12), addr_t(1)); - - const auto blkA_hash = blkA.getHash(); - const auto blkC_hash = blkC.getHash(); - const auto blkE_hash = blkE.getHash(); - const auto blkH_hash = blkH.getHash(); - const auto blkK_hash = blkK.getHash(); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); + + auto blkA = + std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); + auto blkB = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(3), trx_hash_t(4)}, sig_t(1), + blk_hash_t(3), addr_t(1)); + auto blkC = std::make_shared(blk_hash_t(2), 2, vec_blk_t{blk_hash_t(3)}, vec_trx_t{}, sig_t(1), + blk_hash_t(4), addr_t(1)); + auto blkD = + std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(5), addr_t(1)); + auto blkE = std::make_shared(blk_hash_t(4), 3, vec_blk_t{blk_hash_t(5), blk_hash_t(7)}, vec_trx_t{}, + sig_t(1), blk_hash_t(6), addr_t(1)); + auto blkF = + std::make_shared(blk_hash_t(3), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(7), addr_t(1)); + auto blkG = std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{trx_hash_t(4)}, sig_t(1), + blk_hash_t(8), addr_t(1)); + auto blkH = std::make_shared(blk_hash_t(6), 5, vec_blk_t{blk_hash_t(8), blk_hash_t(10)}, vec_trx_t{}, + sig_t(1), blk_hash_t(9), addr_t(1)); + auto blkI = std::make_shared(blk_hash_t(11), 4, vec_blk_t{blk_hash_t(4)}, vec_trx_t{}, sig_t(1), + blk_hash_t(10), addr_t(1)); + auto blkJ = + std::make_shared(blk_hash_t(7), 3, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(11), addr_t(1)); + auto blkK = + std::make_shared(blk_hash_t(10), 5, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(12), addr_t(1)); + + const auto blkA_hash = blkA->getHash(); + const auto blkC_hash = blkC->getHash(); + const auto blkE_hash = blkE->getHash(); + const auto blkH_hash = blkH->getHash(); + const auto blkK_hash = blkK->getHash(); mgr->addDagBlock(std::move(blkA)); mgr->addDagBlock(std::move(blkB)); @@ -419,15 +454,17 @@ TEST_F(DagTest, get_latest_pivot_tips) { auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); auto pbft_chain = std::make_shared(addr_t(), db_ptr); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); - - DagBlock blk2(GENESIS, 1, {}, {}, sig_t(1), blk_hash_t(2), addr_t(15)); - DagBlock blk3(blk_hash_t(2), 2, {}, {}, sig_t(1), blk_hash_t(3), addr_t(15)); - DagBlock blk4(GENESIS, 1, {}, {}, sig_t(1), blk_hash_t(4), addr_t(15)); - DagBlock blk5(blk_hash_t(4), 2, {}, {}, sig_t(1), blk_hash_t(5), addr_t(15)); - DagBlock blk6(blk_hash_t(2), 3, {blk_hash_t(5)}, {}, sig_t(1), blk_hash_t(6), addr_t(15)); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); + + auto blk2 = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(2), addr_t(15)); + auto blk3 = + std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(3), addr_t(15)); + auto blk4 = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(4), addr_t(15)); + auto blk5 = + std::make_shared(blk_hash_t(4), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(5), addr_t(15)); + auto blk6 = std::make_shared(blk_hash_t(2), 3, vec_blk_t{blk_hash_t(5)}, vec_trx_t{}, sig_t(1), + blk_hash_t(6), addr_t(15)); mgr->addDagBlock(std::move(blk2)); mgr->addDagBlock(std::move(blk3)); mgr->addDagBlock(std::move(blk4)); @@ -446,9 +483,8 @@ TEST_F(DagTest, initial_pivot) { auto db_ptr = std::make_shared(data_dir / "db"); auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); auto pbft_chain = std::make_shared(addr_t(), db_ptr); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); auto pt = mgr->getLatestPivotAndTips(); @@ -458,7 +494,7 @@ TEST_F(DagTest, initial_pivot) { } // namespace taraxa::core_tests using namespace taraxa; -int main(int argc, char** argv) { +int main(int argc, char **argv) { static_init(); auto logging = logger::createDefaultLoggingConfig(); logging.verbosity = logger::Verbosity::Error; diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 283d016813..376ce2fa92 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -1,10 +1,12 @@ +#include "final_chain/final_chain.hpp" + #include #include #include "common/constants.hpp" +#include "common/encoding_solidity.hpp" #include "common/vrf_wrapper.hpp" #include "config/config.hpp" -#include "final_chain/final_chain_impl.hpp" #include "final_chain/trie_common.hpp" #include "libdevcore/CommonJS.h" #include "network/rpc/eth/Eth.h" @@ -25,7 +27,7 @@ struct advance_check_opts { struct FinalChainTest : WithDataDir { std::shared_ptr db{new DbStorage(data_dir / "db")}; FullNodeConfig cfg = FullNodeConfig(); - std::shared_ptr SUT; + std::shared_ptr SUT; bool assume_only_toplevel_transfers = true; std::unordered_map expected_balances; uint64_t expected_blk_num = 0; @@ -45,11 +47,11 @@ struct FinalChainTest : WithDataDir { } void init() { - SUT = std::make_shared(db, cfg, addr_t{}); + SUT = std::make_shared(db, cfg, addr_t{}); const auto& effective_balances = effective_initial_balances(cfg.genesis.state); cfg.genesis.state.dpos.yield_percentage = 0; for (const auto& [addr, _] : cfg.genesis.state.initial_balances) { - auto acc_actual = SUT->get_account(addr); + auto acc_actual = SUT->getAccount(addr); ASSERT_TRUE(acc_actual); const auto expected_bal = effective_balances.at(addr); ASSERT_EQ(acc_actual->balance, expected_bal); @@ -64,7 +66,8 @@ struct FinalChainTest : WithDataDir { trx_hashes.emplace_back(trx->getHash()); } - DagBlock dag_blk({}, {}, {}, trx_hashes, {}, {}, dag_proposer_keys.secret()); + auto dag_blk = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, trx_hashes, 0, VdfSortition{}, + dag_proposer_keys.secret()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = @@ -85,19 +88,19 @@ struct FinalChainTest : WithDataDir { db->savePeriodData(period_data, batch); db->commitWriteBatch(batch); - auto result = SUT->finalize(std::move(period_data), {dag_blk.getHash()}).get(); + auto result = SUT->finalize(std::move(period_data), {dag_blk->getHash()}).get(); const auto& blk_h = *result->final_chain_blk; - EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header(blk_h.number))); - EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header())); + EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->blockHeader(blk_h.number))); + EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->blockHeader())); const auto& receipts = result->trx_receipts; - EXPECT_EQ(blk_h.hash, SUT->block_header()->hash); - EXPECT_EQ(blk_h.hash, SUT->block_hash()); - EXPECT_EQ(blk_h.parent_hash, SUT->block_header(expected_blk_num - 1)->hash); + EXPECT_EQ(blk_h.hash, SUT->blockHeader()->hash); + EXPECT_EQ(blk_h.hash, SUT->blockHash()); + EXPECT_EQ(blk_h.parent_hash, SUT->blockHeader(expected_blk_num - 1)->hash); EXPECT_EQ(blk_h.number, expected_blk_num); - EXPECT_EQ(blk_h.number, SUT->last_block_number()); + EXPECT_EQ(blk_h.number, SUT->lastBlockNumber()); EXPECT_EQ(SUT->transactionCount(blk_h.number), trxs.size()); for (size_t i = 0; i < trxs.size(); i++) EXPECT_EQ(*SUT->transactions(blk_h.number)[i], *trxs[i]); - EXPECT_EQ(*SUT->block_number(*SUT->block_hash(blk_h.number)), expected_blk_num); + EXPECT_EQ(*SUT->blockNumber(*SUT->blockHash(blk_h.number)), expected_blk_num); EXPECT_EQ(blk_h.author, pbft_block->getBeneficiary()); EXPECT_EQ(blk_h.timestamp, pbft_block->getTimestamp()); EXPECT_EQ(receipts.size(), trxs.size()); @@ -111,8 +114,8 @@ struct FinalChainTest : WithDataDir { EXPECT_EQ(blk_h.extra_data, pbft_block->getExtraDataRlp()); EXPECT_EQ(blk_h.nonce(), Nonce()); EXPECT_EQ(blk_h.difficulty(), 0); - EXPECT_EQ(blk_h.mix_hash(), h256()); - EXPECT_EQ(blk_h.uncles_hash(), EmptyRLPListSHA3()); + EXPECT_EQ(blk_h.mixHash(), h256()); + EXPECT_EQ(blk_h.unclesHash(), EmptyRLPListSHA3()); EXPECT_TRUE(!blk_h.state_root.isZero()); LogBloom expected_block_log_bloom; std::unordered_map expected_balance_changes; @@ -124,7 +127,7 @@ struct FinalChainTest : WithDataDir { if (!opts.expect_to_fail) { EXPECT_TRUE(r.gas_used != 0); } - EXPECT_EQ(util::rlp_enc(r), util::rlp_enc(*SUT->transaction_receipt(trx->getHash()))); + EXPECT_EQ(util::rlp_enc(r), util::rlp_enc(*SUT->transactionReceipt(trx->getHash()))); cumulative_gas_used_actual += r.gas_used; if (assume_only_toplevel_transfers && trx->getValue() != 0 && r.status_code == 1) { const auto& sender = trx->getSender(); @@ -134,10 +137,10 @@ struct FinalChainTest : WithDataDir { all_addrs_w_changed_balance.insert(sender); all_addrs_w_changed_balance.insert(receiver); expected_balances[receiver] += trx->getValue(); - if (SUT->get_account(sender)->code_size == 0) { + if (SUT->getAccount(sender)->code_size == 0) { expected_balance_changes[sender] = expected_balances[sender]; } - if (SUT->get_account(receiver)->code_size == 0) { + if (SUT->getAccount(receiver)->code_size == 0) { expected_balance_changes[receiver] = expected_balances[receiver]; } } @@ -152,7 +155,7 @@ struct FinalChainTest : WithDataDir { EXPECT_EQ(r.bloom(), LogBloom()); } expected_block_log_bloom |= r.bloom(); - auto trx_loc = *SUT->transaction_location(trx->getHash()); + auto trx_loc = *SUT->transactionLocation(trx->getHash()); EXPECT_EQ(trx_loc.period, blk_h.number); EXPECT_EQ(trx_loc.position, i); } @@ -163,7 +166,7 @@ struct FinalChainTest : WithDataDir { EXPECT_EQ(blk_h.log_bloom, expected_block_log_bloom); if (assume_only_toplevel_transfers) { for (const auto& addr : all_addrs_w_changed_balance) { - EXPECT_EQ(SUT->get_account(addr)->balance, expected_balances[addr]); + EXPECT_EQ(SUT->getAccount(addr)->balance, expected_balances[addr]); } } return result; @@ -307,9 +310,9 @@ TEST_F(FinalChainTest, initial_validators) { init(); const auto votes_per_address = cfg.genesis.state.dpos.validator_maximum_stake / cfg.genesis.state.dpos.vote_eligibility_balance_step; - const auto total_votes = SUT->dpos_eligible_total_vote_count(SUT->last_block_number()); + const auto total_votes = SUT->dposEligibleTotalVoteCount(SUT->lastBlockNumber()); for (const auto& vk : validator_keys) { - const auto address_votes = SUT->dpos_eligible_vote_count(SUT->last_block_number(), vk.address()); + const auto address_votes = SUT->dposEligibleVoteCount(SUT->lastBlockNumber(), vk.address()); EXPECT_EQ(votes_per_address, address_votes); EXPECT_EQ(validator_keys.size() * votes_per_address, total_votes); } @@ -334,13 +337,13 @@ TEST_F(FinalChainTest, nonce_test) { advance({trx3}); advance({trx4}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 4); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 4); // nonce_skipping is enabled, ok auto trx6 = std::make_shared(6, 100, 0, 100000, dev::bytes(), sk, receiver_addr); advance({trx6}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 7); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 7); // nonce is lower, fail auto trx5 = std::make_shared(5, 101, 0, 100000, dev::bytes(), sk, receiver_addr); @@ -362,10 +365,10 @@ TEST_F(FinalChainTest, nonce_skipping) { auto trx4 = std::make_shared(3, 100, 0, 100000, dev::bytes(), sk, receiver_addr); advance({trx1}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 1); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 1); advance({trx3}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 3); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 3); // fail transaction with the same nonce advance({trx3}, {false, false, true}); @@ -373,10 +376,10 @@ TEST_F(FinalChainTest, nonce_skipping) { // fail transaction with lower nonce advance({trx2}, {false, false, true}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 3); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 3); advance({trx4}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 4); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 4); } TEST_F(FinalChainTest, exec_trx_with_nonce_from_api) { @@ -398,7 +401,7 @@ TEST_F(FinalChainTest, exec_trx_with_nonce_from_api) { auto trx = std::make_shared(nonce, 1, 0, 1000000, dev::fromHex(samples::greeter_contract_code), sk); auto result = advance({trx}, {false, false, true}); } - auto account = SUT->get_account(addr); + auto account = SUT->getAccount(addr); ASSERT_EQ(account->nonce, nonce + 1); auto trx = std::make_shared(account->nonce, 1, 0, 1000000, dev::fromHex(samples::greeter_contract_code), sk); @@ -469,7 +472,7 @@ TEST_F(FinalChainTest, failed_transaction_fee) { auto trx2_1 = std::make_shared(2, 101, 1, gas, dev::bytes(), sk, receiver); advance({trx1}); - auto blk = SUT->block_header(expected_blk_num); + auto blk = SUT->blockHeader(expected_blk_num); auto proposer_balance = SUT->getBalance(blk->author); EXPECT_EQ(proposer_balance.first, 21000); advance({trx2}); @@ -477,24 +480,24 @@ TEST_F(FinalChainTest, failed_transaction_fee) { { // low nonce trx should fail and consume all gas - auto balance_before = SUT->get_account(addr)->balance; + auto balance_before = SUT->getAccount(addr)->balance; advance({trx2_1}, {false, false, true}); - auto receipt = SUT->transaction_receipt(trx2_1->getHash()); + auto receipt = SUT->transactionReceipt(trx2_1->getHash()); EXPECT_EQ(receipt->gas_used, gas); - EXPECT_EQ(balance_before - SUT->get_account(addr)->balance, receipt->gas_used * trx2_1->getGasPrice()); + EXPECT_EQ(balance_before - SUT->getAccount(addr)->balance, receipt->gas_used * trx2_1->getGasPrice()); } { // transaction gas is bigger then current account balance. Use closest int as gas used and decrease sender balance // by gas_used * gas_price - ASSERT_GE(gas, SUT->get_account(addr)->balance); - auto balance_before = SUT->get_account(addr)->balance; + ASSERT_GE(gas, SUT->getAccount(addr)->balance); + auto balance_before = SUT->getAccount(addr)->balance; auto gas_price = 3; auto trx4 = std::make_shared(4, 100, gas_price, gas, dev::bytes(), sk, receiver); advance({trx4}, {false, false, true}); - auto receipt = SUT->transaction_receipt(trx4->getHash()); + auto receipt = SUT->transactionReceipt(trx4->getHash()); EXPECT_GT(balance_before % gas_price, 0); EXPECT_EQ(receipt->gas_used, balance_before / gas_price); - EXPECT_EQ(SUT->get_account(addr)->balance, balance_before % gas_price); + EXPECT_EQ(SUT->getAccount(addr)->balance, balance_before % gas_price); } } @@ -832,9 +835,9 @@ TEST_F(FinalChainTest, remove_jailed_validator_votes_from_total) { init(); const auto votes_per_address = cfg.genesis.state.dpos.validator_maximum_stake / cfg.genesis.state.dpos.vote_eligibility_balance_step; - const auto total_votes_before = SUT->dpos_eligible_total_vote_count(SUT->last_block_number()); + const auto total_votes_before = SUT->dposEligibleTotalVoteCount(SUT->lastBlockNumber()); for (const auto& vk : validator_keys) { - const auto address_votes = SUT->dpos_eligible_vote_count(SUT->last_block_number(), vk.address()); + const auto address_votes = SUT->dposEligibleVoteCount(SUT->lastBlockNumber(), vk.address()); EXPECT_EQ(votes_per_address, address_votes); EXPECT_EQ(validator_keys.size() * votes_per_address, total_votes_before); } @@ -853,7 +856,7 @@ TEST_F(FinalChainTest, remove_jailed_validator_votes_from_total) { advance({}); } - const auto total_votes = SUT->dpos_eligible_total_vote_count(SUT->last_block_number()); + const auto total_votes = SUT->dposEligibleTotalVoteCount(SUT->lastBlockNumber()); EXPECT_EQ(total_votes_before - votes_per_address, total_votes); } diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index b5aa407e1d..e495028a71 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -11,7 +11,7 @@ #include "cli/config.hpp" #include "cli/tools.hpp" #include "common/constants.hpp" -#include "common/static_init.hpp" +#include "common/init.hpp" #include "dag/dag_block_proposer.hpp" #include "dag/dag_manager.hpp" #include "graphql/mutation.hpp" @@ -55,20 +55,23 @@ struct FullNodeTest : NodesTest {}; TEST_F(FullNodeTest, db_test) { auto db_ptr = std::make_shared(data_dir); auto &db = *db_ptr; - DagBlock blk1(blk_hash_t(1), 1, {}, {trx_hash_t(1), trx_hash_t(2)}, sig_t(777), blk_hash_t(0xB1), addr_t(999)); - DagBlock blk2(blk_hash_t(1), 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(777), blk_hash_t(0xB2), addr_t(999)); - DagBlock blk3(blk_hash_t(0xB1), 2, {}, {trx_hash_t(5)}, sig_t(777), blk_hash_t(0xB6), addr_t(999)); + auto blk1 = std::make_shared(blk_hash_t(1), 1, vec_blk_t{}, vec_trx_t{trx_hash_t(1), trx_hash_t(2)}, + sig_t(777), blk_hash_t(0xB1), addr_t(999)); + auto blk2 = std::make_shared(blk_hash_t(1), 1, vec_blk_t{}, vec_trx_t{trx_hash_t(3), trx_hash_t(4)}, + sig_t(777), blk_hash_t(0xB2), addr_t(999)); + auto blk3 = std::make_shared(blk_hash_t(0xB1), 2, vec_blk_t{}, vec_trx_t{trx_hash_t(5)}, sig_t(777), + blk_hash_t(0xB6), addr_t(999)); // DAG db.saveDagBlock(blk1); db.saveDagBlock(blk2); db.saveDagBlock(blk3); - EXPECT_EQ(blk1, *db.getDagBlock(blk1.getHash())); - EXPECT_EQ(blk2, *db.getDagBlock(blk2.getHash())); - EXPECT_EQ(blk3, *db.getDagBlock(blk3.getHash())); + EXPECT_EQ(*blk1, *db.getDagBlock(blk1->getHash())); + EXPECT_EQ(*blk2, *db.getDagBlock(blk2->getHash())); + EXPECT_EQ(*blk3, *db.getDagBlock(blk3->getHash())); std::set s1, s2; - s1.emplace(blk1.getHash()); - s1.emplace(blk2.getHash()); - s2.emplace(blk3.getHash()); + s1.emplace(blk1->getHash()); + s1.emplace(blk2->getHash()); + s2.emplace(blk3->getHash()); EXPECT_EQ(db.getBlocksByLevel(1), s1); EXPECT_EQ(db.getBlocksByLevel(2), s2); @@ -371,6 +374,8 @@ TEST_F(FullNodeTest, sync_five_nodes) { } } + void dummy_initial_transfer() { coin_transfer(0, dummy_client.getAddress(), 1000000, true); } + auto getIssuedTrxCount() { shared_lock l(m); return issued_trx_count; @@ -417,7 +422,7 @@ TEST_F(FullNodeTest, sync_five_nodes) { void assert_all_transactions_known() { for (auto &n : nodes_) { for (auto &t : transactions) { - auto location = n->getFinalChain()->transaction_location(t); + auto location = n->getFinalChain()->transactionLocation(t); ASSERT_EQ(location.has_value(), true); } } @@ -426,7 +431,7 @@ TEST_F(FullNodeTest, sync_five_nodes) { void assert_all_transactions_success() { for (auto &n : nodes_) { for (auto &t : transactions) { - auto receipt = n->getFinalChain()->transaction_receipt(t); + auto receipt = n->getFinalChain()->transactionReceipt(t); if (receipt->status_code != 1) { auto trx = n->getTransactionManager()->getTransaction(t); std::cout << "failed: " << t.toString() << " sender: " << trx->getSender() << " nonce: " << trx->getNonce() @@ -442,7 +447,7 @@ TEST_F(FullNodeTest, sync_five_nodes) { wait(wait_for, [this](auto &ctx) { for (auto &n : nodes_) { for (auto &t : transactions) { - if (!n->getFinalChain()->transaction_location(t)) { + if (!n->getFinalChain()->transactionLocation(t)) { ctx.fail(); } } @@ -455,8 +460,9 @@ TEST_F(FullNodeTest, sync_five_nodes) { std::vector all_transactions; // transfer some coins to your friends ... - auto init_bal = own_effective_genesis_bal(nodes[0]->getConfig()) / nodes.size(); + auto init_bal = own_effective_genesis_bal(nodes[0]->getConfig()) / (nodes.size() + 1); + context.dummy_initial_transfer(); { for (size_t i(1); i < nodes.size(); ++i) { // we shouldn't wait for transaction execution because it could be in alternative dag @@ -830,7 +836,7 @@ TEST_F(FullNodeTest, reconstruct_dag) { taraxa::thisThreadSleepForMilliSeconds(100); for (size_t i = 0; i < num_blks; i++) { - EXPECT_EQ(true, node->getDagManager()->addDagBlock(DagBlock(mock_dags[i])).first); + EXPECT_EQ(true, node->getDagManager()->addDagBlock(mock_dags[i]).first); } taraxa::thisThreadSleepForMilliSeconds(100); @@ -850,7 +856,7 @@ TEST_F(FullNodeTest, reconstruct_dag) { // TODO: pbft does not support node stop yet, to be fixed ... node->getPbftManager()->stop(); for (size_t i = 0; i < num_blks; i++) { - EXPECT_EQ(true, node->getDagManager()->addDagBlock(DagBlock(mock_dags[i])).first); + EXPECT_EQ(true, node->getDagManager()->addDagBlock(mock_dags[i]).first); } taraxa::thisThreadSleepForMilliSeconds(100); vertices3 = node->getDagManager()->getNumVerticesInDag().first; @@ -982,7 +988,7 @@ TEST_F(FullNodeTest, sync_two_nodes2) { // send 1000 trxs try { std::cout << "Sending 1000 trxs ..." << std::endl; - sendTrx(1000, 7778); + sendTrx(1000, 7778, nodes[0]->getSecretKey()); std::cout << "1000 trxs sent ..." << std::endl; } catch (std::exception &e) { @@ -1109,7 +1115,7 @@ TEST_F(FullNodeTest, receive_send_transaction) { auto node = create_nodes(node_cfgs, true /*start*/).front(); try { - sendTrx(1000, 7778); + sendTrx(1000, 7778, node->getSecretKey()); } catch (std::exception &e) { std::cerr << e.what() << std::endl; } @@ -1285,7 +1291,7 @@ TEST_F(FullNodeTest, db_rebuild) { nodes[0]->getTransactionManager()->insertTransaction(dummy_trx); trxs_count++; thisThreadSleepForMilliSeconds(100); - executed_chain_size = nodes[0]->getFinalChain()->last_block_number(); + executed_chain_size = nodes[0]->getFinalChain()->lastBlockNumber(); if (executed_chain_size == 5) { trxs_count_at_pbft_size_5 = nodes[0]->getDB()->getNumTransactionExecuted(); } @@ -1303,7 +1309,7 @@ TEST_F(FullNodeTest, db_rebuild) { ctx.fail(); } }); - executed_chain_size = nodes[0]->getFinalChain()->last_block_number(); + executed_chain_size = nodes[0]->getFinalChain()->lastBlockNumber(); std::cout << "Executed transactions " << trxs_count_at_pbft_size_5 << " at chain size 5" << std::endl; std::cout << "Total executed transactions " << executed_trxs << std::endl; std::cout << "Executed chain size " << executed_chain_size << std::endl; @@ -1316,7 +1322,7 @@ TEST_F(FullNodeTest, db_rebuild) { auto nodes = launch_nodes(node_cfgs); ASSERT_HAPPENS({10s, 100ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nodes[0]->getDB()->getNumTransactionExecuted(), trxs_count) - WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->last_block_number(), executed_chain_size) + WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->lastBlockNumber(), executed_chain_size) }); } @@ -1326,7 +1332,7 @@ TEST_F(FullNodeTest, db_rebuild) { auto nodes = launch_nodes(node_cfgs); EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nodes[0]->getDB()->getNumTransactionExecuted(), trxs_count) - WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->last_block_number(), executed_chain_size) + WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->lastBlockNumber(), executed_chain_size) }); } @@ -1338,7 +1344,7 @@ TEST_F(FullNodeTest, db_rebuild) { auto nodes = launch_nodes(node_cfgs); EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nodes[0]->getDB()->getNumTransactionExecuted(), trxs_count_at_pbft_size_5) - WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->last_block_number(), 5) + WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->lastBlockNumber(), 5) }); } @@ -1348,7 +1354,7 @@ TEST_F(FullNodeTest, db_rebuild) { auto nodes = launch_nodes(node_cfgs); EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nodes[0]->getDB()->getNumTransactionExecuted(), trxs_count_at_pbft_size_5) - WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->last_block_number(), 5) + WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->lastBlockNumber(), 5) }); } } @@ -1437,7 +1443,7 @@ TEST_F(FullNodeTest, light_node) { // broadcast dummy transaction nodes[1]->getTransactionManager()->insertTransaction(dummy_trx); thisThreadSleepForMilliSeconds(200); - nodes[1]->getDagManager()->clearLightNodeHistory(); + nodes[1]->getDagManager()->clearLightNodeHistory(node_cfgs[1].light_node_history); } EXPECT_HAPPENS({10s, 1s}, [&](auto &ctx) { // Verify full node and light node sync without any issues @@ -1639,7 +1645,7 @@ TEST_F(FullNodeTest, graphql_test) { data = service::ScalarArgument::require("data", result); block = service::ScalarArgument::require("block", data); const auto hash = service::StringArgument::require("hash", block); - EXPECT_EQ(nodes[0]->getFinalChain()->block_header(3)->hash.toString(), hash); + EXPECT_EQ(nodes[0]->getFinalChain()->blockHeader(3)->hash.toString(), hash); // Get block hash by number query = R"({ block(number: 2) { transactionAt(index: 0) { hash } } })"_graphql; @@ -1656,7 +1662,7 @@ TEST_F(FullNodeTest, graphql_test) { block = service::ScalarArgument::require("block", data); auto transactionAt = service::ScalarArgument::require("transactionAt", block); const auto hash2 = service::StringArgument::require("hash", transactionAt); - EXPECT_EQ(nodes[0]->getFinalChain()->transaction_hashes(2)->at(0).toString(), hash2); + EXPECT_EQ(nodes[0]->getFinalChain()->transactionHashes(2)->at(0).toString(), hash2); } } // namespace taraxa::core_tests diff --git a/tests/network_test.cpp b/tests/network_test.cpp index b831bc0a61..e4ea3bf976 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -6,8 +6,8 @@ #include #include +#include "common/init.hpp" #include "common/lazy.hpp" -#include "common/static_init.hpp" #include "config/config.hpp" #include "dag/dag.hpp" #include "dag/dag_block_proposer.hpp" @@ -99,8 +99,9 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { const auto dag_genesis = node1->getConfig().genesis.dag_genesis_block.getHash(); dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trxs[0]}); vdf.computeVdfSolution(sortition_params, vdf_msg, false); - DagBlock blk(dag_genesis, proposal_level, {}, {trxs[0]->getHash()}, estimation, vdf, node1->getSecretKey()); - const auto block_hash = blk.getHash(); + auto blk = std::make_shared(dag_genesis, proposal_level, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, + estimation, vdf, node1->getSecretKey()); + const auto block_hash = blk->getHash(); dag_mgr1->addDagBlock(std::move(blk), {trxs[0]}); std::vector> dag_blocks; @@ -124,8 +125,8 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { node1->getTransactionManager()->insertValidatedTransaction(std::move(tx)); } for (size_t i = 0; i < dag_blocks.size(); i++) { - if (dag_mgr1->verifyBlock(*dag_blocks[i]).first == DagManager::VerifyBlockReturnType::Verified) - dag_mgr1->addDagBlock(DagBlock(*dag_blocks[i]), {trxs[i]}); + if (dag_mgr1->verifyBlock(dag_blocks[i]).first == DagManager::VerifyBlockReturnType::Verified) + dag_mgr1->addDagBlock(dag_blocks[i], {trxs[i]}); } wait({1s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr1->getDagBlock(block_hash), nullptr) }); const auto node1_period = node1->getPbftChain()->getPbftChainSize(); @@ -164,9 +165,10 @@ TEST_F(NetworkTest, propagate_block) { const auto dag_genesis = node1->getConfig().genesis.dag_genesis_block.getHash(); dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trxs[0]}); vdf.computeVdfSolution(sortition_params, vdf_msg, false); - DagBlock blk(dag_genesis, proposal_level, {}, {trxs[0]->getHash()}, estimation, vdf, node1->getSecretKey()); + auto blk = std::make_shared(dag_genesis, proposal_level, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, + estimation, vdf, node1->getSecretKey()); - const auto block_hash = blk.getHash(); + const auto block_hash = blk->getHash(); // Add block gossip it to connected peers dag_mgr1->addDagBlock(std::move(blk), {trxs[0]}); @@ -413,7 +415,7 @@ TEST_F(NetworkTest, node_sync) { // Allow node to start up taraxa::thisThreadSleepForMilliSeconds(1000); - std::vector>> blks; + std::vector, std::shared_ptr>> blks; // Generate DAG blocks const auto dag_genesis = node1->getConfig().genesis.dag_genesis_block.getHash(); const auto sk = node1->getSecretKey(); @@ -428,43 +430,48 @@ TEST_F(NetworkTest, node_sync) { dev::bytes vdf_msg1 = DagManager::getVdfMessage(dag_genesis, {g_signed_trx_samples[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg1, false); - DagBlock blk1(dag_genesis, propose_level, {}, {g_signed_trx_samples[1]->getHash()}, estimation, vdf1, sk); + auto blk1 = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[1]->getHash()}, estimation, vdf1, sk); propose_level = 2; vdf_sortition::VdfSortition vdf2(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1.getHash(), {g_signed_trx_samples[2]}); + dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1->getHash(), {g_signed_trx_samples[2]}); vdf2.computeVdfSolution(vdf_config, vdf_msg2, false); - DagBlock blk2(blk1.getHash(), propose_level, {}, {g_signed_trx_samples[2]->getHash()}, estimation, vdf2, sk); + auto blk2 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[2]->getHash()}, estimation, vdf2, sk); propose_level = 3; vdf_sortition::VdfSortition vdf3(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg3 = DagManager::getVdfMessage(blk2.getHash(), {g_signed_trx_samples[3]}); + dev::bytes vdf_msg3 = DagManager::getVdfMessage(blk2->getHash(), {g_signed_trx_samples[3]}); vdf3.computeVdfSolution(vdf_config, vdf_msg3, false); - DagBlock blk3(blk2.getHash(), propose_level, {}, {g_signed_trx_samples[3]->getHash()}, estimation, vdf3, sk); + auto blk3 = std::make_shared(blk2->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[3]->getHash()}, estimation, vdf3, sk); propose_level = 4; vdf_sortition::VdfSortition vdf4(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg4 = DagManager::getVdfMessage(blk3.getHash(), {g_signed_trx_samples[4]}); + dev::bytes vdf_msg4 = DagManager::getVdfMessage(blk3->getHash(), {g_signed_trx_samples[4]}); vdf4.computeVdfSolution(vdf_config, vdf_msg4, false); - DagBlock blk4(blk3.getHash(), propose_level, {}, {g_signed_trx_samples[4]->getHash()}, estimation, vdf4, sk); + auto blk4 = std::make_shared(blk3->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[4]->getHash()}, estimation, vdf4, sk); propose_level = 5; vdf_sortition::VdfSortition vdf5(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg5 = DagManager::getVdfMessage(blk4.getHash(), {g_signed_trx_samples[5]}); + dev::bytes vdf_msg5 = DagManager::getVdfMessage(blk4->getHash(), {g_signed_trx_samples[5]}); vdf5.computeVdfSolution(vdf_config, vdf_msg5, false); - DagBlock blk5(blk4.getHash(), propose_level, {}, {g_signed_trx_samples[5]->getHash()}, estimation, vdf5, sk); + auto blk5 = std::make_shared(blk4->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[5]->getHash()}, estimation, vdf5, sk); propose_level = 6; vdf_sortition::VdfSortition vdf6(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg6 = DagManager::getVdfMessage(blk5.getHash(), {g_signed_trx_samples[6]}); + dev::bytes vdf_msg6 = DagManager::getVdfMessage(blk5->getHash(), {g_signed_trx_samples[6]}); vdf6.computeVdfSolution(vdf_config, vdf_msg6, false); - DagBlock blk6(blk5.getHash(), propose_level, {blk4.getHash(), blk3.getHash()}, {g_signed_trx_samples[6]->getHash()}, - estimation, vdf6, sk); + auto blk6 = std::make_shared(blk5->getHash(), propose_level, vec_blk_t{blk4->getHash(), blk3->getHash()}, + vec_trx_t{g_signed_trx_samples[6]->getHash()}, estimation, vdf6, sk); blks.push_back(std::make_pair(blk1, g_signed_trx_samples[1])); blks.push_back(std::make_pair(blk2, g_signed_trx_samples[2])); @@ -521,18 +528,19 @@ TEST_F(NetworkTest, node_pbft_sync) { vdf_sortition::VdfSortition vdf1(vdf_config, vrf_sk, getRlpBytes(level), 1, 1); dev::bytes vdf_msg1 = DagManager::getVdfMessage(dag_genesis, {g_signed_trx_samples[0], g_signed_trx_samples[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg1, false); - DagBlock blk1(dag_genesis, 1, {}, {g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 0, vdf1, - sk); + auto blk1 = std::make_shared( + dag_genesis, 1, vec_blk_t{}, vec_trx_t{g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 0, + vdf1, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[0])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[1])); - node1->getDagManager()->verifyBlock(DagBlock(blk1)); - node1->getDagManager()->addDagBlock(DagBlock(blk1)); + node1->getDagManager()->verifyBlock(blk1); + node1->getDagManager()->addDagBlock(blk1); dev::RLPStream order_stream(1); order_stream.appendList(1); - order_stream << blk1.getHash(); + order_stream << blk1->getHash(); - PbftBlock pbft_block1(prev_block_hash, blk1.getHash(), dev::sha3(order_stream.out()), kNullBlockHash, period, + PbftBlock pbft_block1(prev_block_hash, blk1->getHash(), dev::sha3(order_stream.out()), kNullBlockHash, period, beneficiary, node1->getSecretKey(), {}, {}); std::vector> votes_for_pbft_blk1; votes_for_pbft_blk1.emplace_back( @@ -557,10 +565,10 @@ TEST_F(NetworkTest, node_pbft_sync) { db1->commitWriteBatch(batch); vec_blk_t order1; - order1.push_back(blk1.getHash()); + order1.push_back(blk1->getHash()); { std::unique_lock dag_lock(node1->getDagManager()->getDagMutex()); - node1->getDagManager()->setDagBlockOrder(blk1.getHash(), level, order1); + node1->getDagManager()->setDagBlockOrder(blk1->getHash(), level, order1); } uint64_t expect_pbft_chain_size = 1; @@ -571,22 +579,23 @@ TEST_F(NetworkTest, node_pbft_sync) { level = 2; vdf_sortition::VdfSortition vdf2(vdf_config, vrf_sk, getRlpBytes(level), 1, 1); - dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1.getHash(), {g_signed_trx_samples[2], g_signed_trx_samples[3]}); + dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1->getHash(), {g_signed_trx_samples[2], g_signed_trx_samples[3]}); vdf2.computeVdfSolution(vdf_config, vdf_msg2, false); - DagBlock blk2(blk1.getHash(), 2, {}, {g_signed_trx_samples[2]->getHash(), g_signed_trx_samples[3]->getHash()}, 0, - vdf2, sk); + auto blk2 = std::make_shared( + blk1->getHash(), 2, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[2]->getHash(), g_signed_trx_samples[3]->getHash()}, 0, vdf2, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[2])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[3])); - node1->getDagManager()->verifyBlock(DagBlock(blk2)); - node1->getDagManager()->addDagBlock(DagBlock(blk2)); + node1->getDagManager()->verifyBlock(blk2); + node1->getDagManager()->addDagBlock(blk2); batch = db1->createWriteBatch(); period = 2; beneficiary = addr_t(654); dev::RLPStream order_stream2(1); order_stream2.appendList(1); - order_stream2 << blk2.getHash(); - PbftBlock pbft_block2(prev_block_hash, blk2.getHash(), dev::sha3(order_stream2.out()), kNullBlockHash, period, + order_stream2 << blk2->getHash(); + PbftBlock pbft_block2(prev_block_hash, blk2->getHash(), dev::sha3(order_stream2.out()), kNullBlockHash, period, beneficiary, node1->getSecretKey(), {}, {}); std::vector> votes_for_pbft_blk2; votes_for_pbft_blk2.emplace_back( @@ -617,10 +626,10 @@ TEST_F(NetworkTest, node_pbft_sync) { db1->commitWriteBatch(batch); vec_blk_t order2; - order2.push_back(blk2.getHash()); + order2.push_back(blk2->getHash()); { std::unique_lock dag_lock(node1->getDagManager()->getDagMutex()); - node1->getDagManager()->setDagBlockOrder(blk2.getHash(), level, order2); + node1->getDagManager()->setDagBlockOrder(blk2->getHash(), level, order2); } expect_pbft_chain_size = 2; @@ -668,18 +677,19 @@ TEST_F(NetworkTest, node_pbft_sync_without_enough_votes) { vdf_sortition::VdfSortition vdf1(vdf_config, vrf_sk, getRlpBytes(level), 1, 1); dev::bytes vdf_msg1 = DagManager::getVdfMessage(dag_genesis, {g_signed_trx_samples[0], g_signed_trx_samples[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg1, false); - DagBlock blk1(dag_genesis, 1, {}, {g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 0, vdf1, - sk); + auto blk1 = std::make_shared( + dag_genesis, 1, vec_blk_t{}, vec_trx_t{g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 0, + vdf1, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[0])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[1])); - node1->getDagManager()->verifyBlock(DagBlock(blk1)); - node1->getDagManager()->addDagBlock(DagBlock(blk1)); + node1->getDagManager()->verifyBlock(blk1); + node1->getDagManager()->addDagBlock(blk1); dev::RLPStream order_stream(1); order_stream.appendList(1); - order_stream << blk1.getHash(); + order_stream << blk1->getHash(); - PbftBlock pbft_block1(prev_block_hash, blk1.getHash(), dev::sha3(order_stream.out()), kNullBlockHash, period, + PbftBlock pbft_block1(prev_block_hash, blk1->getHash(), dev::sha3(order_stream.out()), kNullBlockHash, period, beneficiary, node1->getSecretKey(), {}, {}); const auto pbft_block1_cert_vote = node1->getVoteManager()->generateVote( pbft_block1.getBlockHash(), PbftVoteTypes::cert_vote, pbft_block1.getPeriod(), 1, 3); @@ -707,14 +717,15 @@ TEST_F(NetworkTest, node_pbft_sync_without_enough_votes) { prev_block_hash = pbft_block1.getBlockHash(); level = 2; vdf_sortition::VdfSortition vdf2(vdf_config, vrf_sk, getRlpBytes(level), 1, 1); - dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1.getHash(), {g_signed_trx_samples[2], g_signed_trx_samples[3]}); + dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1->getHash(), {g_signed_trx_samples[2], g_signed_trx_samples[3]}); vdf2.computeVdfSolution(vdf_config, vdf_msg2, false); - DagBlock blk2(blk1.getHash(), 2, {}, {g_signed_trx_samples[2]->getHash(), g_signed_trx_samples[3]->getHash()}, 0, - vdf2, sk); + auto blk2 = std::make_shared( + blk1->getHash(), 2, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[2]->getHash(), g_signed_trx_samples[3]->getHash()}, 0, vdf2, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[2])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[3])); - node1->getDagManager()->verifyBlock(DagBlock(blk2)); - node1->getDagManager()->addDagBlock(DagBlock(blk2)); + node1->getDagManager()->verifyBlock(blk2); + node1->getDagManager()->addDagBlock(blk2); batch = db1->createWriteBatch(); period = 2; @@ -722,9 +733,9 @@ TEST_F(NetworkTest, node_pbft_sync_without_enough_votes) { dev::RLPStream order_stream2(1); order_stream2.appendList(1); - order_stream2 << blk2.getHash(); + order_stream2 << blk2->getHash(); - PbftBlock pbft_block2(prev_block_hash, blk2.getHash(), dev::sha3(order_stream2.out()), kNullBlockHash, period, + PbftBlock pbft_block2(prev_block_hash, blk2->getHash(), dev::sha3(order_stream2.out()), kNullBlockHash, period, beneficiary, node1->getSecretKey(), {}, {}); const auto pbft_block2_cert_vote = node1->getVoteManager()->generateVote( pbft_block2.getBlockHash(), PbftVoteTypes::cert_vote, pbft_block2.getPeriod(), 1, 3); @@ -856,7 +867,7 @@ TEST_F(NetworkTest, pbft_next_votes_sync_in_same_round) { node2->getPbftManager()->setPbftRound(2); // Node 1 broadcast his votes - node1_pbft_mgr->testBroadcatVotesFunctionality(); + node1_pbft_mgr->testBroadcastVotesFunctionality(); // Node 2 should receive votes from node 1, node 1 has its own 2 votes EXPECT_EQ(node1_vote_mgr->getVerifiedVotesSize(), 2); EXPECT_HAPPENS({5s, 100ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, node2_vote_mgr->getVerifiedVotesSize(), 3) }); @@ -886,71 +897,76 @@ TEST_F(NetworkTest, node_sync_with_transactions) { dev::RLPStream s; dev::bytes vdf_msg1 = DagManager::getVdfMessage(dag_genesis, {g_signed_trx_samples[0], g_signed_trx_samples[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg1, false); - DagBlock blk1(dag_genesis, propose_level, {}, - {g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 2 * estimation, vdf1, sk); + auto blk1 = std::make_shared( + dag_genesis, propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 2 * estimation, vdf1, sk); propose_level = 2; vdf_sortition::VdfSortition vdf2(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1.getHash(), {g_signed_trx_samples[2]}); + dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1->getHash(), {g_signed_trx_samples[2]}); vdf2.computeVdfSolution(vdf_config, vdf_msg2, false); - DagBlock blk2(blk1.getHash(), propose_level, {}, {g_signed_trx_samples[2]->getHash()}, estimation, vdf2, sk); + auto blk2 = std::make_shared(blk1->getHash(), propose_level, vec_trx_t{}, + vec_trx_t{g_signed_trx_samples[2]->getHash()}, estimation, vdf2, sk); propose_level = 3; vdf_sortition::VdfSortition vdf3(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg3 = DagManager::getVdfMessage(blk2.getHash(), {g_signed_trx_samples[3]}); + dev::bytes vdf_msg3 = DagManager::getVdfMessage(blk2->getHash(), {g_signed_trx_samples[3]}); vdf3.computeVdfSolution(vdf_config, vdf_msg3, false); - DagBlock blk3(blk2.getHash(), propose_level, {}, {g_signed_trx_samples[3]->getHash()}, estimation, vdf3, sk); + auto blk3 = std::make_shared(blk2->getHash(), propose_level, vec_trx_t{}, + vec_trx_t{g_signed_trx_samples[3]->getHash()}, estimation, vdf3, sk); propose_level = 4; vdf_sortition::VdfSortition vdf4(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg4 = DagManager::getVdfMessage(blk3.getHash(), {g_signed_trx_samples[4]}); + dev::bytes vdf_msg4 = DagManager::getVdfMessage(blk3->getHash(), {g_signed_trx_samples[4]}); vdf4.computeVdfSolution(vdf_config, vdf_msg4, false); - DagBlock blk4(blk3.getHash(), propose_level, {}, {g_signed_trx_samples[4]->getHash()}, estimation, vdf4, sk); + auto blk4 = std::make_shared(blk3->getHash(), propose_level, vec_trx_t{}, + vec_trx_t{g_signed_trx_samples[4]->getHash()}, estimation, vdf4, sk); propose_level = 5; vdf_sortition::VdfSortition vdf5(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg5 = DagManager::getVdfMessage(blk4.getHash(), {g_signed_trx_samples[5], g_signed_trx_samples[6], - g_signed_trx_samples[7], g_signed_trx_samples[8]}); + dev::bytes vdf_msg5 = DagManager::getVdfMessage(blk4->getHash(), {g_signed_trx_samples[5], g_signed_trx_samples[6], + g_signed_trx_samples[7], g_signed_trx_samples[8]}); vdf5.computeVdfSolution(vdf_config, vdf_msg5, false); - DagBlock blk5(blk4.getHash(), propose_level, {}, - {g_signed_trx_samples[5]->getHash(), g_signed_trx_samples[6]->getHash(), - g_signed_trx_samples[7]->getHash(), g_signed_trx_samples[8]->getHash()}, - 4 * estimation, vdf5, sk); + auto blk5 = + std::make_shared(blk4->getHash(), propose_level, vec_trx_t{}, + vec_trx_t{g_signed_trx_samples[5]->getHash(), g_signed_trx_samples[6]->getHash(), + g_signed_trx_samples[7]->getHash(), g_signed_trx_samples[8]->getHash()}, + 4 * estimation, vdf5, sk); propose_level = 6; vdf_sortition::VdfSortition vdf6(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg6 = DagManager::getVdfMessage(blk5.getHash(), {g_signed_trx_samples[9]}); + dev::bytes vdf_msg6 = DagManager::getVdfMessage(blk5->getHash(), {g_signed_trx_samples[9]}); vdf6.computeVdfSolution(vdf_config, vdf_msg6, false); - DagBlock blk6(blk5.getHash(), propose_level, {blk4.getHash(), blk3.getHash()}, {g_signed_trx_samples[9]->getHash()}, - estimation, vdf6, sk); + auto blk6 = std::make_shared(blk5->getHash(), propose_level, vec_trx_t{blk4->getHash(), blk3->getHash()}, + vec_trx_t{g_signed_trx_samples[9]->getHash()}, estimation, vdf6, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[0])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[1])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk1)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk1)); + node1->getDagManager()->addDagBlock(blk1); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[2])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk2)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk2)); + node1->getDagManager()->addDagBlock(blk2); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[3])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk3)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk3)); + node1->getDagManager()->addDagBlock(blk3); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[4])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk4)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk4)); + node1->getDagManager()->addDagBlock(blk4); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[5])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[6])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[7])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[8])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk5)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk5)); + node1->getDagManager()->addDagBlock(blk5); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[9])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk6)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk6)); + node1->getDagManager()->addDagBlock(blk6); // To make sure blocks are stored before starting node 2 taraxa::thisThreadSleepForMilliSeconds(1000); @@ -974,7 +990,7 @@ TEST_F(NetworkTest, node_sync2) { auto node_cfgs = make_node_cfgs(2, 1, 5); auto node1 = create_nodes({node_cfgs[0]}, true /*start*/).front(); - std::vector blks; + std::vector> blks; // Generate DAG blocks const auto dag_genesis = node1->getConfig().genesis.dag_genesis_block.getHash(); const auto sk = node1->getSecretKey(); @@ -989,8 +1005,9 @@ TEST_F(NetworkTest, node_sync2) { 1, 1); dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {transactions[0], transactions[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk1(dag_genesis, propose_level, {}, {transactions[0]->getHash(), transactions[1]->getHash()}, - 2 * estimation, vdf1, sk); + auto blk1 = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, + vec_trx_t{transactions[0]->getHash(), transactions[1]->getHash()}, + 2 * estimation, vdf1, sk); SharedTransactions tr1({transactions[0], transactions[1]}); // DAG block2 propose_level = 1; @@ -998,98 +1015,109 @@ TEST_F(NetworkTest, node_sync2) { 1, 1); vdf_msg = DagManager::getVdfMessage(dag_genesis, {transactions[2], transactions[3]}); vdf2.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk2(dag_genesis, propose_level, {}, {transactions[2]->getHash(), transactions[3]->getHash()}, - 2 * estimation, vdf2, sk); + auto blk2 = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, + vec_trx_t{transactions[2]->getHash(), transactions[3]->getHash()}, + 2 * estimation, vdf2, sk); SharedTransactions tr2({transactions[2], transactions[3]}); // DAG block3 propose_level = 2; vdf_sortition::VdfSortition vdf3(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk1.getHash(), {transactions[4], transactions[5]}); + vdf_msg = DagManager::getVdfMessage(blk1->getHash(), {transactions[4], transactions[5]}); vdf3.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk3(blk1.getHash(), propose_level, {}, {transactions[4]->getHash(), transactions[5]->getHash()}, - 2 * estimation, vdf3, sk); + auto blk3 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[4]->getHash(), transactions[5]->getHash()}, + 2 * estimation, vdf3, sk); SharedTransactions tr3({transactions[4], transactions[5]}); // DAG block4 propose_level = 3; vdf_sortition::VdfSortition vdf4(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk3.getHash(), {transactions[6], transactions[7]}); + vdf_msg = DagManager::getVdfMessage(blk3->getHash(), {transactions[6], transactions[7]}); vdf4.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk4(blk3.getHash(), propose_level, {}, {transactions[6]->getHash(), transactions[7]->getHash()}, - 2 * estimation, vdf4, sk); + auto blk4 = std::make_shared(blk3->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[6]->getHash(), transactions[7]->getHash()}, + 2 * estimation, vdf4, sk); SharedTransactions tr4({transactions[6], transactions[7]}); // DAG block5 propose_level = 2; vdf_sortition::VdfSortition vdf5(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk2.getHash(), {transactions[8], transactions[9]}); + vdf_msg = DagManager::getVdfMessage(blk2->getHash(), {transactions[8], transactions[9]}); vdf5.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk5(blk2.getHash(), propose_level, {}, {transactions[8]->getHash(), transactions[9]->getHash()}, - 2 * estimation, vdf5, sk); + auto blk5 = std::make_shared(blk2->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[8]->getHash(), transactions[9]->getHash()}, + 2 * estimation, vdf5, sk); SharedTransactions tr5({transactions[8], transactions[9]}); // DAG block6 propose_level = 2; vdf_sortition::VdfSortition vdf6(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk1.getHash(), {transactions[10], transactions[11]}); + vdf_msg = DagManager::getVdfMessage(blk1->getHash(), {transactions[10], transactions[11]}); vdf6.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk6(blk1.getHash(), propose_level, {}, {transactions[10]->getHash(), transactions[11]->getHash()}, - 2 * estimation, vdf6, sk); + auto blk6 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[10]->getHash(), transactions[11]->getHash()}, + 2 * estimation, vdf6, sk); SharedTransactions tr6({transactions[10], transactions[11]}); // DAG block7 propose_level = 3; vdf_sortition::VdfSortition vdf7(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk6.getHash(), {transactions[12], transactions[13]}); + vdf_msg = DagManager::getVdfMessage(blk6->getHash(), {transactions[12], transactions[13]}); vdf7.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk7(blk6.getHash(), propose_level, {}, {transactions[12]->getHash(), transactions[13]->getHash()}, - 2 * estimation, vdf7, sk); + auto blk7 = std::make_shared(blk6->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[12]->getHash(), transactions[13]->getHash()}, + 2 * estimation, vdf7, sk); SharedTransactions tr7({transactions[12], transactions[13]}); // DAG block8 propose_level = 4; vdf_sortition::VdfSortition vdf8(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk1.getHash(), {transactions[14], transactions[15]}); + vdf_msg = DagManager::getVdfMessage(blk1->getHash(), {transactions[14], transactions[15]}); vdf8.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk8(blk1.getHash(), propose_level, {blk7.getHash()}, - {transactions[14]->getHash(), transactions[15]->getHash()}, 2 * estimation, vdf8, sk); + auto blk8 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{blk7->getHash()}, + vec_trx_t{transactions[14]->getHash(), transactions[15]->getHash()}, + 2 * estimation, vdf8, sk); SharedTransactions tr8({transactions[14], transactions[15]}); // DAG block9 propose_level = 2; vdf_sortition::VdfSortition vdf9(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk1.getHash(), {transactions[16], transactions[17]}); + vdf_msg = DagManager::getVdfMessage(blk1->getHash(), {transactions[16], transactions[17]}); vdf9.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk9(blk1.getHash(), propose_level, {}, {transactions[16]->getHash(), transactions[17]->getHash()}, - 2 * estimation, vdf9, sk); + auto blk9 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[16]->getHash(), transactions[17]->getHash()}, + 2 * estimation, vdf9, sk); SharedTransactions tr9({transactions[16], transactions[17]}); // DAG block10 propose_level = 5; vdf_sortition::VdfSortition vdf10(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk8.getHash(), {transactions[18], transactions[19]}); + vdf_msg = DagManager::getVdfMessage(blk8->getHash(), {transactions[18], transactions[19]}); vdf10.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk10(blk8.getHash(), propose_level, {}, {transactions[18]->getHash(), transactions[19]->getHash()}, - 2 * estimation, vdf10, sk); + auto blk10 = std::make_shared(blk8->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[18]->getHash(), transactions[19]->getHash()}, + 2 * estimation, vdf10, sk); SharedTransactions tr10({transactions[18], transactions[19]}); // DAG block11 propose_level = 3; vdf_sortition::VdfSortition vdf11(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk3.getHash(), {transactions[20], transactions[21]}); + vdf_msg = DagManager::getVdfMessage(blk3->getHash(), {transactions[20], transactions[21]}); vdf11.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk11(blk3.getHash(), propose_level, {}, {transactions[20]->getHash(), transactions[21]->getHash()}, - 2 * estimation, vdf11, sk); + auto blk11 = std::make_shared(blk3->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[20]->getHash(), transactions[21]->getHash()}, + 2 * estimation, vdf11, sk); SharedTransactions tr11({transactions[20], transactions[21]}); // DAG block12 propose_level = 3; vdf_sortition::VdfSortition vdf12(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk5.getHash(), {transactions[22], transactions[23]}); + vdf_msg = DagManager::getVdfMessage(blk5->getHash(), {transactions[22], transactions[23]}); vdf12.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk12(blk5.getHash(), propose_level, {}, {transactions[22]->getHash(), transactions[23]->getHash()}, - 2 * estimation, vdf12, sk); + auto blk12 = std::make_shared(blk5->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[22]->getHash(), transactions[23]->getHash()}, + 2 * estimation, vdf12, sk); SharedTransactions tr12({transactions[22], transactions[23]}); blks.push_back(blk1); @@ -1122,7 +1150,7 @@ TEST_F(NetworkTest, node_sync2) { for (size_t i = 0; i < blks.size(); ++i) { for (auto t : trxs[i]) node1->getTransactionManager()->insertValidatedTransaction(std::move(t)); node1->getDagManager()->verifyBlock(std::move(blks[i])); - node1->getDagManager()->addDagBlock(DagBlock(blks[i])); + node1->getDagManager()->addDagBlock(blks[i]); } auto node2 = create_nodes({node_cfgs[1]}, true /*start*/).front(); @@ -1165,7 +1193,7 @@ TEST_F(NetworkTest, transaction_gossip_selection) { class TestTransactionPacketHandler : public network::tarcap::TransactionPacketHandler { public: TestTransactionPacketHandler(std::shared_ptr peers_state) - : TransactionPacketHandler({}, peers_state, {}, {}, {}, true) {} + : TransactionPacketHandler({}, peers_state, {}, {}, {}) {} std::vector< std::pair, std::pair>>> public_transactionsToSendToPeers(std::vector transactions) { @@ -1188,7 +1216,6 @@ TEST_F(NetworkTest, transaction_gossip_selection) { dev::p2p::NodeID node_id3(node_key3.pub()); addr_t node_addr1(node_key1.address()); addr_t node_addr2(node_key2.address()); - addr_t node_addr3(node_key3.address()); auto peers_state = std::make_shared(std::weak_ptr(), FullNodeConfig()); peers_state->addPendingPeer(node_id1, {}); diff --git a/tests/p2p_test.cpp b/tests/p2p_test.cpp index 1f84dbdbfb..253347c7c6 100644 --- a/tests/p2p_test.cpp +++ b/tests/p2p_test.cpp @@ -1,18 +1,15 @@ #include #include +#include #include #include #include #include -#include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" -#include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" -#include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "network/tarcap/tarcap_version.hpp" #include "test_util/samples.hpp" #include "test_util/test_util.hpp" @@ -146,7 +143,7 @@ TEST_F(P2PTest, multiple_capabilities) { { test_tarcaps({1}, {1}); } { test_tarcaps({1, 2, 3}, {3, 4, 5}); } - // No common tarcapm version, connection should not be established + // No common tarcap version, connection should not be established { auto nodes = test_tarcaps({1, 2, 3}, {4, 5, 6}, false); // check that connection wasn't established diff --git a/tests/pbft_chain_test.cpp b/tests/pbft_chain_test.cpp index 5769fee427..7adee1ceb9 100644 --- a/tests/pbft_chain_test.cpp +++ b/tests/pbft_chain_test.cpp @@ -5,7 +5,7 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" #include "network/network.hpp" #include "pbft/pbft_manager.hpp" @@ -51,11 +51,11 @@ TEST_F(PbftChainTest, pbft_db_test) { level_t level = 1; vdf_sortition::VdfSortition vdf1(vdf_config, vrf_sk, getRlpBytes(level), 1, 100); vdf1.computeVdfSolution(vdf_config, dag_genesis.asBytes(), false); - DagBlock blk1(dag_genesis, 1, {}, {}, {}, vdf1, sk); + auto blk1 = std::make_shared(dag_genesis, 1, vec_blk_t{}, vec_trx_t{}, 0, vdf1, sk); PbftPeriod period = 1; addr_t beneficiary(987); - PbftBlock pbft_block(prev_block_hash, blk1.getHash(), kNullBlockHash, kNullBlockHash, period, beneficiary, + PbftBlock pbft_block(prev_block_hash, blk1->getHash(), kNullBlockHash, kNullBlockHash, period, beneficiary, node->getSecretKey(), {}); // put into pbft chain and store into DB diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index f5c455d084..3a6a86c766 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -1,6 +1,6 @@ #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "test_util/node_dag_creation_fixture.hpp" @@ -518,7 +518,7 @@ TEST_F(PbftManagerWithDagCreation, dag_generation) { generateAndApplyInitialDag(); EXPECT_HAPPENS({10s, 250ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->get_account(node->getAddress())->nonce, nonce); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->getAccount(node->getAddress())->nonce, nonce); }); auto nonce_before = nonce; @@ -531,7 +531,7 @@ TEST_F(PbftManagerWithDagCreation, dag_generation) { EXPECT_EQ(nonce, nonce_before + tx_count); EXPECT_HAPPENS({60s, 250ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->get_account(node->getAddress())->nonce, nonce); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->getAccount(node->getAddress())->nonce, nonce); WAIT_EXPECT_EQ(ctx, node->getDB()->getNumTransactionExecuted(), nonce - 1); }); } @@ -539,6 +539,7 @@ TEST_F(PbftManagerWithDagCreation, dag_generation) { TEST_F(PbftManagerWithDagCreation, limit_dag_block_size) { auto node_cfgs = make_node_cfgs(1, 1, 5, true); node_cfgs.front().genesis.dag.gas_limit = 500000; + node_cfgs.front().propose_dag_gas_limit = 500000; makeNodeFromConfig(node_cfgs); deployContract(); @@ -596,6 +597,8 @@ TEST_F(PbftManagerWithDagCreation, limit_pbft_block) { auto node_cfgs = make_node_cfgs(1, 1, 5, true); node_cfgs.front().genesis.dag.gas_limit = 500000; node_cfgs.front().genesis.pbft.gas_limit = 1100000; + node_cfgs.front().propose_dag_gas_limit = 500000; + node_cfgs.front().propose_pbft_gas_limit = 1100000; makeNodeFromConfig(node_cfgs); deployContract(); @@ -606,7 +609,7 @@ TEST_F(PbftManagerWithDagCreation, limit_pbft_block) { EXPECT_HAPPENS({10s, 500ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, trxs_before, node->getDB()->getNumTransactionExecuted()); }); - auto starting_block_number = node->getFinalChain()->last_block_number(); + auto starting_block_number = node->getFinalChain()->lastBlockNumber(); auto trx_in_block = 5; insertBlocks(generateDagBlocks(20, 5, trx_in_block)); @@ -617,7 +620,7 @@ TEST_F(PbftManagerWithDagCreation, limit_pbft_block) { }); auto max_pbft_block_capacity = node_cfgs.front().genesis.pbft.gas_limit / (trxEstimation() * 5); - for (size_t i = starting_block_number; i < node->getFinalChain()->last_block_number(); ++i) { + for (size_t i = starting_block_number; i < node->getFinalChain()->lastBlockNumber(); ++i) { const auto &blk_hash = node->getDB()->getPeriodBlockHash(i); ASSERT_TRUE(blk_hash != kNullBlockHash); const auto &pbft_block = node->getPbftChain()->getPbftBlockInChain(blk_hash); @@ -631,6 +634,8 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { auto node_cfgs = make_node_cfgs(1, 1, 5, true); auto dag_gas_limit = node_cfgs.front().genesis.dag.gas_limit = 500000; node_cfgs.front().genesis.pbft.gas_limit = 1100000; + node_cfgs.front().propose_dag_gas_limit = 500000; + node_cfgs.front().propose_pbft_gas_limit = 1100000; makeNodeFromConfig(node_cfgs); deployContract(); @@ -641,13 +646,13 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { EXPECT_HAPPENS({10s, 500ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, trx_count, node->getDB()->getNumTransactionExecuted()); }); - auto starting_block_number = node->getFinalChain()->last_block_number(); + auto starting_block_number = node->getFinalChain()->lastBlockNumber(); const auto trx_in_block = dag_gas_limit / trxEstimation() + 2; insertBlocks(generateDagBlocks(1, 5, trx_in_block)); // We need to move one block forward when we will start applying those generated DAGs and transactions EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->last_block_number(), starting_block_number + 1); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->lastBlockNumber(), starting_block_number + 1); }); // check that new created transaction wasn't executed in that previous block ASSERT_EQ(trx_count, node->getDB()->getNumTransactionExecuted()); @@ -658,15 +663,14 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { // all transactions should be included in 2 blocks WAIT_EXPECT_EQ(ctx, node->getDB()->getNumTransactionExecuted(), trx_count); - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->last_block_number(), starting_block_number + 2); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->lastBlockNumber(), starting_block_number + 2); }); // verify that last block is overweighted, but it is in chain - const auto period = node->getFinalChain()->last_block_number(); - auto period_raw = node->getDB()->getPeriodDataRaw(period); - ASSERT_FALSE(period_raw.empty()); - PeriodData period_data(period_raw); - EXPECT_FALSE(node->getPbftManager()->checkBlockWeight(period_data.dag_blocks)); + const auto period = node->getFinalChain()->lastBlockNumber(); + auto period_data = node->getDB()->getPeriodData(period); + ASSERT_TRUE(period_data.has_value()); + EXPECT_FALSE(node->getPbftManager()->checkBlockWeight(period_data->dag_blocks, period)); } TEST_F(PbftManagerWithDagCreation, proposed_blocks) { @@ -674,14 +678,19 @@ TEST_F(PbftManagerWithDagCreation, proposed_blocks) { ProposedBlocks proposed_blocks(db); std::map> blocks; - const uint32_t block_count = 100; // Create blocks - for (uint32_t i = 1; i <= block_count; i++) { - std::vector reward_votes_hashes; - auto block = std::make_shared(blk_hash_t(1), kNullBlockHash, kNullBlockHash, kNullBlockHash, 2, addr_t(), - dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); - blocks.insert({block->getBlockHash(), block}); + const auto max_period = 3; + const auto blocks_per_period = 40; + for (PbftPeriod period = 1; period <= max_period; period++) { + for (uint32_t i = 1; i <= blocks_per_period; i++) { + std::vector reward_votes_hashes; + auto block = + std::make_shared(blk_hash_t(i), kNullBlockHash, kNullBlockHash, kNullBlockHash, period, addr_t(), + dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); + blocks.insert({block->getBlockHash(), block}); + } } + const uint32_t block_count = blocks.size(); auto now = std::chrono::steady_clock::now(); for (auto b : blocks) { proposed_blocks.pushProposedPbftBlock(b.second); @@ -695,7 +704,7 @@ TEST_F(PbftManagerWithDagCreation, proposed_blocks) { EXPECT_TRUE(blocks.find(b->getBlockHash()) != blocks.end()); } now = std::chrono::steady_clock::now(); - proposed_blocks.cleanupProposedPbftBlocksByPeriod(3); + proposed_blocks.cleanupProposedPbftBlocksByPeriod(4); std::cout << "Time to erase " << block_count << " blocks: " << duration_cast(std::chrono::steady_clock::now() - now).count() << " microseconds" << std::endl; @@ -721,21 +730,16 @@ TEST_F(PbftManagerWithDagCreation, state_root_hash) { } EXPECT_HAPPENS({5s, 500ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->get_account(node->getAddress())->nonce, nonce); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->getAccount(node->getAddress())->nonce, nonce); WAIT_EXPECT_EQ(ctx, node->getDB()->getNumTransactionExecuted(), nonce - 1); }); - const auto &state_root_delay = node_cfgs.front().genesis.state.dpos.delegation_delay; const auto &head_hash = node->getPbftChain()->getLastPbftBlockHash(); auto pbft_block = node->getPbftChain()->getPbftBlockInChain(head_hash); // Check that all produced blocks have correct state_root_hashes while (pbft_block.getPeriod() != 1) { auto period = pbft_block.getPeriod(); - h256 state_root; - if (period > state_root_delay) { - state_root = node->getFinalChain()->block_header(period - state_root_delay)->state_root; - } - EXPECT_EQ(pbft_block.getPrevStateRoot(), state_root); + EXPECT_EQ(pbft_block.getFinalChainHash(), node->getFinalChain()->finalChainHash(period)); pbft_block = node->getPbftChain()->getPbftBlockInChain(pbft_block.getPrevBlockHash()); } diff --git a/tests/pillar_chain_test.cpp b/tests/pillar_chain_test.cpp index 34d2d03c6d..0bf310821d 100644 --- a/tests/pillar_chain_test.cpp +++ b/tests/pillar_chain_test.cpp @@ -1,8 +1,7 @@ #include -#include - -#include "common/static_init.hpp" +#include "common/encoding_solidity.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" #include "pbft/pbft_manager.hpp" #include "pillar_chain/pillar_chain_manager.hpp" @@ -22,16 +21,12 @@ TEST_F(PillarChainTest, pillar_chain_db) { blk_hash_t previous_pillar_block_hash(789); std::vector votes_count_changes; - const auto vote_count_change1 = votes_count_changes.emplace_back(addr_t(1), 1); - const auto vote_count_change2 = votes_count_changes.emplace_back(addr_t(2), 2); const auto pillar_block = std::make_shared( pillar_block_period, state_root, previous_pillar_block_hash, h256{}, 0, std::move(votes_count_changes)); // Pillar block vote counts std::vector vote_counts; - const auto stake1 = votes_count_changes.emplace_back(addr_t(123), 123); - const auto stake2 = votes_count_changes.emplace_back(addr_t(456), 456); // Current pillar block data - block + vote counts pillar_chain::CurrentPillarBlockDataDb current_pillar_block_data{pillar_block, vote_counts}; @@ -100,7 +95,7 @@ TEST_F(PillarChainTest, pillar_blocks_create) { } TEST_F(PillarChainTest, votes_count_changes) { - const auto validators_count = 3; + const auto validators_count = 5; auto node_cfgs = make_node_cfgs(validators_count, validators_count, 10); for (auto& node_cfg : node_cfgs) { @@ -108,18 +103,60 @@ TEST_F(PillarChainTest, votes_count_changes) { node_cfg.genesis.state.hardforks.ficus_hf.block_num = 0; node_cfg.genesis.state.hardforks.ficus_hf.pillar_blocks_interval = 4; } + auto nodes = launch_nodes(node_cfgs); + + auto wait_for_next_pillar_block = [&](size_t txs_count) -> PbftPeriod { + EXPECT_HAPPENS({20s, 100ms}, [&](auto& ctx) { + for (auto& node : nodes) { + if (ctx.fail_if(node->getDB()->getNumTransactionExecuted() != txs_count)) { + return; + } + } + }); + auto chain_size = nodes[0]->getPbftChain()->getPbftChainSize(); + + // Wait until new pillar block with changed validators vote_counts is created + auto new_pillar_block_period = chain_size - + chain_size % node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval + + node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval; + EXPECT_HAPPENS({20s, 250ms}, [&](auto& ctx) { + for (const auto& node : nodes) { + if (ctx.fail_if(node->getPbftChain()->getPbftChainSize() < new_pillar_block_period + 1)) { + return; + } + } + }); - std::vector validators_vote_counts; - validators_vote_counts.reserve(node_cfgs.size()); + return new_pillar_block_period; + }; + + auto checkPillarBlockData = [&](size_t pillar_block_period, + std::unordered_map expected_validators_vote_counts_changes) { + // Check if vote_counts changes in new pillar block changed according to new delegations + for (auto& node : nodes) { + // Check if right amount of pillar blocks were created + const auto new_pillar_block = node->getDB()->getPillarBlock(pillar_block_period); + ASSERT_TRUE(new_pillar_block); + ASSERT_EQ(new_pillar_block->getPeriod(), pillar_block_period); + ASSERT_EQ(new_pillar_block->getValidatorsVoteCountsChanges().size(), + expected_validators_vote_counts_changes.size()); + for (const auto& vote_count_change : new_pillar_block->getValidatorsVoteCountsChanges()) { + EXPECT_TRUE(expected_validators_vote_counts_changes.contains(vote_count_change.addr_)); + ASSERT_EQ(vote_count_change.vote_count_change_, + expected_validators_vote_counts_changes[vote_count_change.addr_]); + } + } + }; + + // Initial stakes of all validators + std::unordered_map expected_validators_vote_counts_changes; for (const auto& validator : node_cfgs[0].genesis.state.dpos.initial_validators) { - auto& vote_count = validators_vote_counts.emplace_back(0); + auto& vote_count = expected_validators_vote_counts_changes[validator.address]; for (const auto& delegation : validator.delegations) { vote_count += delegation.second / node_cfgs[0].genesis.state.dpos.vote_eligibility_balance_step; } } - auto nodes = launch_nodes(node_cfgs); - // Wait until nodes create first pillar block const auto first_pillar_block_period = node_cfgs[0].genesis.state.hardforks.ficus_hf.firstPillarBlockPeriod(); ASSERT_HAPPENS({20s, 250ms}, [&](auto& ctx) { @@ -128,61 +165,52 @@ TEST_F(PillarChainTest, votes_count_changes) { } }); - // Check if vote_counts changes in first pillar block == initial validators vote_counts - for (auto& node : nodes) { - // Check if right amount of pillar blocks were created - const auto first_pillar_block = node->getDB()->getPillarBlock(first_pillar_block_period); - ASSERT_TRUE(first_pillar_block); - - ASSERT_EQ(first_pillar_block->getPeriod(), first_pillar_block_period); - ASSERT_EQ(first_pillar_block->getValidatorsVoteCountsChanges().size(), validators_count); - size_t idx = 0; - for (const auto& vote_count_change : first_pillar_block->getValidatorsVoteCountsChanges()) { - ASSERT_EQ(vote_count_change.vote_count_change_, validators_vote_counts[idx]); - idx++; - } - } + checkPillarBlockData(first_pillar_block_period, expected_validators_vote_counts_changes); - // Change validators delegation - const auto delegation_value = 2 * node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold; + // Delegate to validators + expected_validators_vote_counts_changes.clear(); + size_t txs_count = 0; for (size_t i = 0; i < validators_count; i++) { + const auto delegation_value = (i + 1) * node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold; + expected_validators_vote_counts_changes[toAddress(node_cfgs[i].node_secret)] = i + 1; const auto trx = make_delegate_tx(node_cfgs[i], delegation_value, 1, 1000); nodes[0]->getTransactionManager()->insertTransaction(trx); + txs_count++; } - EXPECT_HAPPENS({20s, 100ms}, [&](auto& ctx) { - for (auto& node : nodes) { - if (ctx.fail_if(node->getDB()->getNumTransactionExecuted() != validators_count)) { - return; - } - } - }); - const auto chain_size = nodes[0]->getPbftChain()->getPbftChainSize(); + auto new_pillar_block_period = wait_for_next_pillar_block(txs_count); + checkPillarBlockData(new_pillar_block_period, expected_validators_vote_counts_changes); - // Wait until new pillar block with changed validators vote_counts is created - const auto new_pillar_block_period = - chain_size - chain_size % node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval + - node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval; - ASSERT_HAPPENS({20s, 250ms}, [&](auto& ctx) { - for (const auto& node : nodes) { - WAIT_EXPECT_GE(ctx, node->getPbftChain()->getPbftChainSize(), new_pillar_block_period + 1) - } - }); + // Undelegate from validators + expected_validators_vote_counts_changes.clear(); + for (size_t i = 0; i < validators_count - 1; i++) { + const auto undelegation_value = (i + 1) * node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold; + expected_validators_vote_counts_changes[toAddress(node_cfgs[i].node_secret)] = dev::s256(i + 1) * -1; + const auto trx = make_undelegate_tx(node_cfgs[i], undelegation_value, 2, 1000); + nodes[0]->getTransactionManager()->insertTransaction(trx); + txs_count++; + } - // Check if vote_counts changes in new pillar block changed according to new delegations - for (auto& node : nodes) { - // Check if right amount of pillar blocks were created - const auto new_pillar_block = node->getDB()->getPillarBlock(new_pillar_block_period); - ASSERT_TRUE(new_pillar_block); - ASSERT_EQ(new_pillar_block->getPeriod(), new_pillar_block_period); - ASSERT_EQ(new_pillar_block->getValidatorsVoteCountsChanges().size(), validators_count); - size_t idx = 0; - for (const auto& vote_count_change : new_pillar_block->getValidatorsVoteCountsChanges()) { - ASSERT_EQ(vote_count_change.vote_count_change_, - delegation_value / node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold); - idx++; - } + new_pillar_block_period = wait_for_next_pillar_block(txs_count); + checkPillarBlockData(new_pillar_block_period, expected_validators_vote_counts_changes); + + // Redelegate + const auto redelegate_to_addr = toAddress(node_cfgs[node_cfgs.size() - 1].node_secret); + expected_validators_vote_counts_changes.clear(); + expected_validators_vote_counts_changes[redelegate_to_addr] = 0; + for (size_t i = 0; i < validators_count - 3; i++) { + const auto node_addr = toAddress(node_cfgs[i].node_secret); + const auto node_vote_count = nodes[0]->getFinalChain()->dposEligibleVoteCount(new_pillar_block_period, node_addr); + const auto redelegation_value = node_vote_count * node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold; + expected_validators_vote_counts_changes[node_addr] = dev::s256(node_vote_count) * -1; + expected_validators_vote_counts_changes[redelegate_to_addr] += dev::s256(node_vote_count); + const auto trx = make_redelegate_tx(node_cfgs[i], redelegation_value, redelegate_to_addr, 3, 1000); + nodes[0]->getTransactionManager()->insertTransaction(trx); + txs_count++; } + + new_pillar_block_period = wait_for_next_pillar_block(txs_count); + checkPillarBlockData(new_pillar_block_period, expected_validators_vote_counts_changes); } TEST_F(PillarChainTest, pillar_chain_syncing) { @@ -200,7 +228,7 @@ TEST_F(PillarChainTest, pillar_chain_syncing) { // Wait until node1 creates at least 3 pillar blocks const auto pillar_blocks_count = 3; ASSERT_HAPPENS({20s, 250ms}, [&](auto& ctx) { - WAIT_EXPECT_EQ(ctx, node1->getFinalChain()->last_block_number(), + WAIT_EXPECT_EQ(ctx, node1->getFinalChain()->lastBlockNumber(), pillar_blocks_count * node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval) }); node1->getPbftManager()->stop(); @@ -209,7 +237,7 @@ TEST_F(PillarChainTest, pillar_chain_syncing) { auto node2 = launch_nodes({node_cfgs[1]})[0]; // Wait until node2 syncs pbft chain with node1 ASSERT_HAPPENS({20s, 200ms}, [&](auto& ctx) { - WAIT_EXPECT_EQ(ctx, node2->getFinalChain()->last_block_number(), node1->getFinalChain()->last_block_number()) + WAIT_EXPECT_EQ(ctx, node2->getFinalChain()->lastBlockNumber(), node1->getFinalChain()->lastBlockNumber()) }); node2->getPbftManager()->stop(); @@ -246,7 +274,7 @@ TEST_F(PillarChainTest, pillar_chain_syncing) { ASSERT_EQ(pillar_vote->getPeriod() - 1, node2_current_pillar_block->getPeriod()); ASSERT_EQ(pillar_vote->getBlockHash(), node2_current_pillar_block->getHash()); votes_count += - node2->getFinalChain()->dpos_eligible_vote_count(pillar_vote->getPeriod() - 1, pillar_vote->getVoterAddr()); + node2->getFinalChain()->dposEligibleVoteCount(pillar_vote->getPeriod() - 1, pillar_vote->getVoterAddr()); } ASSERT_GE(votes_count, threshold); } @@ -368,12 +396,8 @@ TEST_F(PillarChainTest, pillar_block_solidity_rlp_encoding) { blk_hash_t previous_pillar_block_hash(789); std::vector votes_count_changes; - const auto vote_count_change1 = votes_count_changes.emplace_back(addr_t(1), 1); - const auto vote_count_change2 = votes_count_changes.emplace_back(addr_t(2), 2); - - auto vcc = votes_count_changes; const auto pillar_block = pillar_chain::PillarBlock(pillar_block_period, state_root, previous_pillar_block_hash, - bridge_root, epoch, std::move(vcc)); + bridge_root, epoch, std::move(votes_count_changes)); auto validateDecodedPillarBlock = [&](const pillar_chain::PillarBlock& pillar_block) { ASSERT_EQ(pillar_block.getPeriod(), pillar_block_period); @@ -524,7 +548,7 @@ TEST_F(PillarChainTest, finalize_root_in_pillar_block) { ASSERT_EQ(trx->getSender(), kTaraxaSystemAccount); ASSERT_EQ(trx->getReceiver(), node_cfgs[0].genesis.state.hardforks.ficus_hf.bridge_contract_address); // check that correct hash is returned - auto hashes = node->getFinalChain()->transaction_hashes(period - 1); + auto hashes = node->getFinalChain()->transactionHashes(period - 1); ASSERT_EQ(hashes->size(), 1); ASSERT_EQ(hashes->at(0), trx->getHash()); // check that location by hash exists and is_system set to true @@ -540,7 +564,7 @@ TEST_F(PillarChainTest, finalize_root_in_pillar_block) { ASSERT_EQ(trx_by_hash->getReceiver(), node_cfgs[0].genesis.state.hardforks.ficus_hf.bridge_contract_address); ASSERT_EQ(trx_by_hash->getSender(), kTaraxaSystemAccount); // check that receipt exists - const auto& trx_receipt = node->getFinalChain()->transaction_receipt(trx->getHash()); + const auto& trx_receipt = node->getFinalChain()->transactionReceipt(trx->getHash()); ASSERT_TRUE(trx_receipt.has_value()); ASSERT_EQ(trx_receipt->status_code, 1); } diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp index 6fec7173b2..d96245719b 100644 --- a/tests/rewards_stats_test.cpp +++ b/tests/rewards_stats_test.cpp @@ -19,11 +19,18 @@ struct RewardsStatsTest : NodesTest {}; class TestableRewardsStats : public rewards::Stats { public: - TestableRewardsStats(const HardforksConfig::RewardsDistributionMap& rdm, std::shared_ptr db) - : rewards::Stats( - 100, - HardforksConfig{0, {}, rdm, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{0, 0}, FicusHardforkConfig{0, 0}}, - db, [](auto) { return 100; }) {} + TestableRewardsStats(const HardforksConfig::RewardsDistributionMap& rdm, std::shared_ptr db) + : rewards::Stats(100, + HardforksConfig{0, + {}, + rdm, + MagnoliaHardfork{0, 0}, + 0, + 0, + AspenHardfork{0, 0}, + FicusHardforkConfig{0, 0, {}}, + CornusHardforkConfig{0, 0, 0, 0}}, + db, [](auto) { return 100; }) {} auto getStats() { return blocks_stats_; } }; @@ -206,7 +213,8 @@ TEST_F(RewardsStatsTest, feeRewards) { auto trx = std::make_shared(nonce++, 0, 1, trx_gas_fee, dev::fromHex(samples::greeter_contract_code), pbft_proposer.secret()); - DagBlock dag_blk({}, {}, {}, {trx->getHash()}, {}, {}, dag_proposer.secret()); + auto dag_blk = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trx->getHash()}, 0, + VdfSortition{}, dag_proposer.secret()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = @@ -241,11 +249,27 @@ TEST_F(RewardsStatsTest, dagBlockRewards) { // Create two reward stats to test before and after aspen hardfork part 1 rewards::Stats pre_aspen_reward_stats(100, - HardforksConfig{0, {}, {}, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{6, 999}}, + HardforksConfig{0, + {}, + {}, + MagnoliaHardfork{0, 0}, + 0, + 0, + AspenHardfork{6, 999}, + FicusHardforkConfig{0, 0, {}}, + CornusHardforkConfig{0, 0, 0, 0}}, db, [](auto) { return 100; }); - rewards::Stats post_aspen_reward_stats( - 100, HardforksConfig{0, {}, {}, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{4, 999}}, db, - [](auto) { return 100; }); + rewards::Stats post_aspen_reward_stats(100, + HardforksConfig{0, + {}, + {}, + MagnoliaHardfork{0, 0}, + 0, + 0, + AspenHardfork{4, 999}, + FicusHardforkConfig{0, 0, {}}, + CornusHardforkConfig{0, 0, 0, 0}}, + db, [](auto) { return 100; }); // Create pbft block with 5 dag blocks auto dag_key1 = dev::KeyPair::create(); @@ -263,35 +287,40 @@ TEST_F(RewardsStatsTest, dagBlockRewards) { vdf_sortition::VdfSortition vdf1(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(1)), 1, 1); - DagBlock dag_blk1({}, {}, {}, {trxs[0]->getHash()}, 0, vdf1, dag_key1.secret()); + auto dag_blk1 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, 0, + vdf1, dag_key1.secret()); block.dag_blocks.push_back(dag_blk1); vdf_sortition::VdfSortition vdf2(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(2)), 1, 1); - DagBlock dag_blk2({}, {}, {}, {trxs[1]->getHash()}, 0, vdf2, dag_key2.secret()); + auto dag_blk2 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[1]->getHash()}, 0, + vdf2, dag_key2.secret()); block.dag_blocks.push_back(dag_blk2); vdf_sortition::VdfSortition vdf3(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(3)), 1, 1); - DagBlock dag_blk3({}, {}, {}, {trxs[0]->getHash()}, 0, vdf3, dag_key3.secret()); + auto dag_blk3 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, 0, + vdf3, dag_key3.secret()); block.dag_blocks.push_back(dag_blk3); vdf_sortition::VdfSortition vdf4(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(4)), 1, 1); - DagBlock dag_blk4({}, {}, {}, {trxs[1]->getHash()}, 0, vdf4, dag_key4.secret()); + auto dag_blk4 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[1]->getHash()}, 0, + vdf4, dag_key4.secret()); block.dag_blocks.push_back(dag_blk4); vdf_sortition::VdfSortition vdf5(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(5)), 1, 1); - DagBlock dag_blk5({}, {}, {}, {trxs[2]->getHash()}, 0, vdf5, dag_key5.secret()); + auto dag_blk5 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[2]->getHash()}, 0, + vdf5, dag_key5.secret()); block.dag_blocks.push_back(dag_blk5); block.transactions = trxs; - ASSERT_EQ(dag_blk1.getDifficulty(), 17); - ASSERT_EQ(dag_blk2.getDifficulty(), 17); - ASSERT_EQ(dag_blk3.getDifficulty(), 16); - ASSERT_EQ(dag_blk4.getDifficulty(), 17); - ASSERT_EQ(dag_blk5.getDifficulty(), 16); + ASSERT_EQ(dag_blk1->getDifficulty(), 17); + ASSERT_EQ(dag_blk2->getDifficulty(), 17); + ASSERT_EQ(dag_blk3->getDifficulty(), 16); + ASSERT_EQ(dag_blk4->getDifficulty(), 17); + ASSERT_EQ(dag_blk5->getDifficulty(), 16); std::vector gas_used{10, 20, 30}; diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index f6b6c90dbc..eab52aaad1 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -1,10 +1,8 @@ #include -#include #include #include #include "network/rpc/eth/Eth.h" -#include "test_util/gtest.hpp" #include "test_util/samples.hpp" namespace taraxa::core_tests { @@ -32,9 +30,9 @@ TEST_F(RPCTest, eth_estimateGas) { { Json::Value trx(Json::objectValue); trx["data"] = samples::greeter_contract_code; - check_estimation_is_in_range(trx, "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5dcc5"); trx["from"] = from; - check_estimation_is_in_range(trx, "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5dcc5"); } // Contract creation with value @@ -42,7 +40,7 @@ TEST_F(RPCTest, eth_estimateGas) { Json::Value trx(Json::objectValue); trx["value"] = 1; trx["data"] = samples::greeter_contract_code; - check_estimation_is_in_range(trx, "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5dcc5"); } // Simple transfer estimations with author + without author @@ -76,8 +74,8 @@ TEST_F(RPCTest, eth_call) { eth_rpc_params.final_chain = final_chain; auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); - const auto last_block_num = final_chain->last_block_number(); - const u256 total_eligible = final_chain->dpos_eligible_total_vote_count(last_block_num); + const auto last_block_num = final_chain->lastBlockNumber(); + const u256 total_eligible = final_chain->dposEligibleTotalVoteCount(last_block_num); const auto total_eligible_str = dev::toHexPrefixed(dev::toBigEndian(total_eligible)); const auto empty_address = dev::KeyPair::create().address().toString(); @@ -227,7 +225,7 @@ TEST_F(RPCTest, eth_getBlock) { eth_rpc_params.final_chain = nodes.front()->getFinalChain(); auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); - wait({10s, 500ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, 5, nodes[0]->getFinalChain()->last_block_number()); }); + wait({10s, 500ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, 5, nodes[0]->getFinalChain()->lastBlockNumber()); }); auto block = eth_json_rpc->eth_getBlockByNumber("0x4", false); EXPECT_EQ(4, dev::jsToU256(block["number"].asString())); @@ -250,7 +248,7 @@ TEST_F(RPCTest, eip_1898) { EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, zero_block)); Json::Value genesis_block(Json::objectValue); - genesis_block["blockHash"] = dev::toJS(*nodes.front()->getFinalChain()->block_hash(0)); + genesis_block["blockHash"] = dev::toJS(*nodes.front()->getFinalChain()->blockHash(0)); EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, genesis_block)); } diff --git a/tests/sortition_test.cpp b/tests/sortition_test.cpp index d4c79419de..3bc8023534 100644 --- a/tests/sortition_test.cpp +++ b/tests/sortition_test.cpp @@ -43,7 +43,7 @@ PeriodData createBlock(PbftPeriod period, uint16_t efficiency, size_t dag_blocks for (size_t i = 0; i < dag_blocks_count; ++i) { vec_trx_t trxs{trx_hashes.begin() + i * trx_per_block, trx_hashes.begin() + (i + 1) * trx_per_block}; - b.dag_blocks.push_back({{}, {}, {}, trxs, {}}); + b.dag_blocks.push_back(std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, trxs, secret_t{})); }; size_t issued_overlap_count = 0; @@ -51,7 +51,7 @@ PeriodData createBlock(PbftPeriod period, uint16_t efficiency, size_t dag_blocks size_t overlap = std::min(kTrxCount - effective_transactions - issued_overlap_count, trx_hashes.size()); issued_overlap_count += overlap; vec_trx_t trxs{trx_hashes.begin(), trx_hashes.begin() + overlap}; - b.dag_blocks.push_back({{}, {}, {}, trxs, {}}); + b.dag_blocks.push_back(std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, trxs, secret_t{})); } return b; } diff --git a/tests/state_api_test.cpp b/tests/state_api_test.cpp index 723ac88150..56db16ced7 100644 --- a/tests/state_api_test.cpp +++ b/tests/state_api_test.cpp @@ -4,10 +4,10 @@ #include #include -#include #include #include "common/encoding_rlp.hpp" +#include "slashing_manager/slashing_manager.hpp" #include "test_util/test_util.hpp" namespace taraxa::state_api { @@ -80,12 +80,12 @@ TEST_F(StateAPITest, DISABLED_dpos_integration) { // exp_q_acc_res[addr].is_eligible = true; // } // string meta = "at block " + to_string(curr_blk); - // EXPECT_EQ(addr_1_bal_expected, SUT.get_account(curr_blk, make_addr(1))->balance) << meta; + // EXPECT_EQ(addr_1_bal_expected, SUT.getAccount(curr_blk, make_addr(1))->balance) << meta; // for (auto const& addr : expected_eligible_set) { - // EXPECT_TRUE(SUT.dpos_is_eligible(curr_blk, addr)) << meta; - // EXPECT_EQ(SUT.dpos_eligible_vote_count(curr_blk, addr), 1) << meta; + // EXPECT_TRUE(SUT.dposIsEligible(curr_blk, addr)) << meta; + // EXPECT_EQ(SUT.dposEligibleVoteCount(curr_blk, addr), 1) << meta; // } - // EXPECT_EQ(SUT.dpos_eligible_total_vote_count(curr_blk), expected_eligible_set.size()) << meta; + // EXPECT_EQ(SUT.dposEligibleTotalVoteCount(curr_blk), expected_eligible_set.size()) << meta; // // auto q_res = SUT.dpos_query(curr_blk, q); // EXPECT_EQ(q_res.eligible_count, expected_eligible_set.size()) << meta; // for (auto& [addr, res_exp] : exp_q_acc_res) { @@ -222,23 +222,21 @@ TEST_F(StateAPITest, slashing) { auto nodes = launch_nodes(node_cfgs); auto node = nodes.begin()->get(); auto node_cfg = node_cfgs.begin(); - ASSERT_EQ(true, - node->getFinalChain()->dpos_is_eligible(node->getFinalChain()->last_block_number(), node->getAddress())); + ASSERT_EQ(true, node->getFinalChain()->dposIsEligible(node->getFinalChain()->lastBlockNumber(), node->getAddress())); // Generate 2 cert votes for 2 different blocks auto vote_a = node->getVoteManager()->generateVote(blk_hash_t{1}, PbftVoteTypes::cert_vote, 1, 1, 3); auto vote_b = node->getVoteManager()->generateVote(blk_hash_t{2}, PbftVoteTypes::cert_vote, 1, 1, 3); // Commit double voting proof - auto slashing_manager = std::make_shared(node->getFinalChain(), node->getTransactionManager(), - node->getGasPricer(), *node_cfg, node->getSecretKey()); + auto slashing_manager = std::make_shared(*node_cfg, node->getFinalChain(), + node->getTransactionManager(), node->getGasPricer()); ASSERT_EQ(true, slashing_manager->submitDoubleVotingProof(vote_a, vote_b)); // After few blocks malicious validator should be jailed ASSERT_HAPPENS({10s, 100ms}, [&](auto& ctx) { - WAIT_EXPECT_EQ( - ctx, false, - node->getFinalChain()->dpos_is_eligible(node->getFinalChain()->last_block_number(), node->getAddress())) + WAIT_EXPECT_EQ(ctx, false, + node->getFinalChain()->dposIsEligible(node->getFinalChain()->lastBlockNumber(), node->getAddress())) }); // Option 2: more sophisticated and longer test @@ -246,7 +244,7 @@ TEST_F(StateAPITest, slashing) { // ASSERT_HAPPENS({5s, 100ms}, [&](auto& ctx) { // WAIT_EXPECT_EQ( // ctx, true, - // node->getFinalChain()->dpos_is_eligible(node->getFinalChain()->last_block_number(), node->getAddress())) + // node->getFinalChain()->dposIsEligible(node->getFinalChain()->lastBlockNumber(), node->getAddress())) // }); } diff --git a/tests/tarcap_threadpool_test.cpp b/tests/tarcap_threadpool_test.cpp index 4bca7fae80..1801631288 100644 --- a/tests/tarcap_threadpool_test.cpp +++ b/tests/tarcap_threadpool_test.cpp @@ -1,11 +1,12 @@ #include -#include - #include "config/config.hpp" +#include "config/version.hpp" #include "dag/dag_block.hpp" #include "logger/logger.hpp" #include "network/tarcap/packets_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp" +#include "network/tarcap/shared_states/peers_state.hpp" #include "network/threadpool/tarcap_thread_pool.hpp" #include "test_util/test_util.hpp" @@ -83,14 +84,19 @@ struct HandlersInitData { dev::p2p::NodeID copySender() { return sender_node_id; } }; -class DummyPacketHandler : public tarcap::PacketHandler { +struct DummyPacket { + std::string type_str; + threadpool::PacketData::PacketId packet_id; +}; + +class DummyPacketHandler : public network::tarcap::BasePacketHandler { public: DummyPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, uint32_t processing_delay_ms) - : PacketHandler(init_data.conf, init_data.peers_state, init_data.packets_stats, init_data.own_node_addr, - log_channel_name), - processing_delay_ms_(processing_delay_ms), - packets_proc_info_(init_data.packets_processing_info) {} + : processing_delay_ms_(processing_delay_ms), packets_proc_info_(init_data.packets_processing_info) { + const auto node_addr = init_data.own_node_addr; + LOG_OBJECTS_CREATE(log_channel_name); + } virtual ~DummyPacketHandler() = default; DummyPacketHandler(const DummyPacketHandler&) = default; @@ -98,24 +104,32 @@ class DummyPacketHandler : public tarcap::PacketHandler { DummyPacketHandler& operator=(const DummyPacketHandler&) = delete; DummyPacketHandler& operator=(DummyPacketHandler&&) = delete; - private: - void validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData& packet_data) const override {} + void processPacket(const threadpool::PacketData& packet_data) override { + // Decode packet rlp into packet object + DummyPacket packet{packet_data.type_str_, packet_data.id_}; - void process(const threadpool::PacketData& packet_data, - [[maybe_unused]] const std::shared_ptr& peer) override { + // Main processing function + process(std::move(packet), {}); + } + + private: + void process(DummyPacket&& packet, [[maybe_unused]] const std::shared_ptr& peer) { // Note do not use LOG() before saving start & finish time as it is internally synchronized and can // cause delays, which result in tests fails auto start_time = std::chrono::steady_clock::now(); std::this_thread::sleep_for(std::chrono::milliseconds(processing_delay_ms_)); auto finish_time = std::chrono::steady_clock::now(); - LOG(log_dg_) << "Processing packet: " << packet_data.type_str_ << ", id(" << packet_data.id_ << ") finished. " + LOG(log_dg_) << "Processing packet: " << packet.type_str << ", id(" << packet.packet_id << ") finished. " << "Start time: " << start_time.time_since_epoch().count() << ", finish time: " << finish_time.time_since_epoch().count(); - packets_proc_info_->addPacketProcessingTimes(packet_data.id_, {start_time, finish_time}); + packets_proc_info_->addPacketProcessingTimes(packet.packet_id, {start_time, finish_time}); } + // Declare logger instances + LOG_OBJECTS_DEFINE + uint32_t processing_delay_ms_{0}; std::shared_ptr packets_proc_info_; }; @@ -127,7 +141,7 @@ class DummyTransactionPacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::TransactionPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; }; class DummyDagBlockPacketHandler : public DummyPacketHandler { @@ -137,7 +151,7 @@ class DummyDagBlockPacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::DagBlockPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; }; class DummyStatusPacketHandler : public DummyPacketHandler { @@ -147,7 +161,7 @@ class DummyStatusPacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::StatusPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; }; class DummyVotePacketHandler : public DummyPacketHandler { @@ -157,7 +171,7 @@ class DummyVotePacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::VotePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; }; class DummyGetNextVotesBundlePacketHandler : public DummyPacketHandler { @@ -167,7 +181,7 @@ class DummyGetNextVotesBundlePacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetNextVotesSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; }; class DummyVotesBundlePacketHandler : public DummyPacketHandler { @@ -177,7 +191,7 @@ class DummyVotesBundlePacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::VotesBundlePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; }; class DummyGetDagSyncPacketHandler : public DummyPacketHandler { @@ -187,7 +201,7 @@ class DummyGetDagSyncPacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetDagSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; }; class DummyGetPbftSyncPacketHandler : public DummyPacketHandler { @@ -197,7 +211,7 @@ class DummyGetPbftSyncPacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetPbftSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; }; class DummyDagSyncPacketHandler : public DummyPacketHandler { @@ -207,7 +221,7 @@ class DummyDagSyncPacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::DagSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; }; class DummyPbftSyncPacketHandler : public DummyPacketHandler { @@ -217,7 +231,7 @@ class DummyPbftSyncPacketHandler : public DummyPacketHandler { : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PbftSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; }; HandlersInitData createHandlersInitData() { @@ -335,49 +349,51 @@ TEST_F(TarcapTpTest, block_free_packets) { tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); // Pushes packets to the tp - auto packet = createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {}); + auto packet = createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {}); if (packet.second.rlp_.isList()) { std::cout << "is list"; } else { std::cout << "not list"; } const auto packet0_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); const auto packet1_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); const auto packet2_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); const auto packet3_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); const auto packet4_dag_block_id = - tp.push(createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(0, 1)})) + tp.push( + createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, 1)})) .value(); const auto packet5_dag_block_id = - tp.push(createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(0, 2)})) + tp.push( + createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, 2)})) .value(); const auto packet8_status_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::StatusPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); const auto packet9_status_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::StatusPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); const auto packet12_vote_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotePacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); const auto packet13_vote_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotePacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); const auto packet14_get_pbft_next_votes_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetNextVotesSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); const auto packet15_get_pbft_next_votes_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetNextVotesSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); const auto packet16_pbft_next_votes_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotesBundlePacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); size_t packets_count = 0; const auto packet17_pbft_next_votes_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotesBundlePacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); tp.startProcessing(); @@ -480,25 +496,25 @@ TEST_F(TarcapTpTest, hard_blocking_deps) { // Pushes packets to the tp const auto packet0_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); const auto packet1_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); const auto packet2_get_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetDagSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); const auto packet3_get_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetDagSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); const auto packet4_get_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetPbftSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); const auto packet5_get_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetPbftSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); const auto packet6_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::PbftSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); const auto packet7_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::PbftSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); size_t packets_count = 0; const auto packet8_get_dag_sync_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetDagSyncPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); tp.startProcessing(); @@ -602,17 +618,17 @@ TEST_F(TarcapTpTest, peer_order_blocking_deps) { // Pushes packets to the tp const auto packet0_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket)).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); const auto packet1_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket)).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); const auto packet2_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagSyncPacket)).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket)).value(); const auto packet3_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket)).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); size_t packets_count = 0; const auto packet4_dag_block_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(1)})) + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1)})) .value(); // How should packets be processed: @@ -690,17 +706,17 @@ TEST_F(TarcapTpTest, same_dag_blks_ordering) { // Pushes packets to the tp const auto blk0_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); const auto blk1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); const auto blk2_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); const auto blk3_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); size_t packets_count = 0; const auto blk4_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); tp.startProcessing(); @@ -753,24 +769,24 @@ TEST_F(TarcapTpTest, dag_blks_lvls_ordering) { // Pushes packets to the tp const auto blk0_lvl1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(1, 1)})) + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, 1)})) .value(); const auto blk1_lvl1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(1, 2)})) + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, 2)})) .value(); const auto blk2_lvl0_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(0, 3)})) + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, 3)})) .value(); const auto blk3_lvl1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(1, 4)})) + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, 4)})) .value(); const auto blk4_lvl2_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(2, 5)})) + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(2, 5)})) .value(); size_t packets_count = 0; const auto blk5_lvl3_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(3, 6)})) + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(3, 6)})) .value(); tp.startProcessing(); @@ -856,7 +872,7 @@ TEST_F(TarcapTpTest, threads_borrowing) { // Pushes packets to the tp std::vector pushed_packets_ids; for (size_t i = 0; i < threads_num; i++) { - uint64_t packet_id = tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotePacket, {})).value(); + uint64_t packet_id = tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); pushed_packets_ids.push_back(packet_id); } @@ -948,13 +964,13 @@ TEST_F(TarcapTpTest, low_priotity_queue_starvation) { // packets from each queue concurrently -> many packets will be waiting due to max threads num reached for specific // priority queues for (size_t i = 0; i < 2 * 10 * threads_num; i++) { - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotePacket, {})).value(); - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); } // Push a few packets low priority packets for (size_t i = 0; i < 4; i++) { - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::StatusPacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); } tp.startProcessing(); diff --git a/tests/test_util/gtest.hpp b/tests/test_util/gtest.hpp index ef4f01e94f..e8c46555f5 100644 --- a/tests/test_util/gtest.hpp +++ b/tests/test_util/gtest.hpp @@ -4,8 +4,8 @@ #include +#include "common/init.hpp" #include "common/lazy.hpp" -#include "common/static_init.hpp" #include "config/config.hpp" namespace fs = std::filesystem; @@ -46,7 +46,7 @@ struct WithDataDir : virtual BaseTest { std::filesystem::remove_all(data_dir); std::filesystem::create_directories(data_dir); } - virtual ~WithDataDir() { std::filesystem::remove_all(data_dir); } + virtual ~WithDataDir() = default; // { std::filesystem::remove_all(data_dir); } WithDataDir(const WithDataDir &) = delete; WithDataDir(WithDataDir &&) = delete; diff --git a/tests/test_util/include/test_util/node_dag_creation_fixture.hpp b/tests/test_util/include/test_util/node_dag_creation_fixture.hpp index d8b4d34b7d..938dc124b5 100644 --- a/tests/test_util/include/test_util/node_dag_creation_fixture.hpp +++ b/tests/test_util/include/test_util/node_dag_creation_fixture.hpp @@ -18,7 +18,7 @@ struct NodeDagCreationFixture : NodesTest { NodeDagCreationFixture() : NodesTest() {} ~NodeDagCreationFixture() = default; struct DagBlockWithTxs { - DagBlock blk; + std::shared_ptr blk; SharedTransactions trxs; }; void modifyConfig(FullNodeConfig &cfg); diff --git a/tests/test_util/include/test_util/samples.hpp b/tests/test_util/include/test_util/samples.hpp index 132370024c..ac0e436b59 100644 --- a/tests/test_util/include/test_util/samples.hpp +++ b/tests/test_util/include/test_util/samples.hpp @@ -99,16 +99,13 @@ class TxGenerator { inline auto const TX_GEN = Lazy([] { return TxGenerator(); }); -bool sendTrx(uint64_t count, unsigned port); +bool sendTrx(uint64_t count, unsigned port, dev::Secret secret); SharedTransactions createSignedTrxSamples(unsigned start, unsigned num, secret_t const &sk, bytes data = dev::fromHex("00FEDCBA9876543210000000")); -std::vector createMockDagBlkSamples(unsigned pivot_start, unsigned blk_num, unsigned trx_start, - unsigned trx_len, unsigned trx_overlap); +std::vector> createMockDag0(const blk_hash_t &genesis); -std::vector createMockDag0(const blk_hash_t &genesis); - -std::vector createMockDag1(const blk_hash_t &genesis); +std::vector> createMockDag1(const blk_hash_t &genesis); } // namespace taraxa::core_tests::samples diff --git a/tests/test_util/include/test_util/test_util.hpp b/tests/test_util/include/test_util/test_util.hpp index 7bb1e82c67..a537279b47 100644 --- a/tests/test_util/include/test_util/test_util.hpp +++ b/tests/test_util/include/test_util/test_util.hpp @@ -3,22 +3,16 @@ #include #include -#include #include #include #include #include #include -#include #include -#include #include #include "../../gtest.hpp" -#include "common/encoding_solidity.hpp" -#include "common/vrf_wrapper.hpp" #include "config/config.hpp" -#include "network/network.hpp" #include "node/node.hpp" #include "transaction/transaction_manager.hpp" @@ -157,6 +151,8 @@ struct TransactionClient { Context process(const std::shared_ptr& trx, bool wait_executed = true) const; Context coinTransfer(const addr_t& to, const val_t& val, bool wait_executed = true); + + addr_t getAddress() const { return dev::KeyPair(secret_).address(); } }; SharedTransaction make_dpos_trx(const FullNodeConfig& sender_node_cfg, const u256& value = 0, uint64_t nonce = 0, @@ -165,6 +161,12 @@ SharedTransaction make_dpos_trx(const FullNodeConfig& sender_node_cfg, const u25 SharedTransaction make_delegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, uint64_t nonce, const u256& gas_price); +SharedTransaction make_undelegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, uint64_t nonce, + const u256& gas_price); + +SharedTransaction make_redelegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, const Address& to, + uint64_t nonce, const u256& gas_price); + u256 own_balance(const std::shared_ptr& node); state_api::BalanceMap effective_initial_balances(const state_api::Config& cfg); @@ -190,8 +192,8 @@ std::shared_ptr genDummyVote(PbftVoteTypes type, PbftPeriod period, Pb std::pair clearAllVotes(const std::vector>& nodes); struct NodesTest : virtual WithDataDir { - virtual ~NodesTest() {} NodesTest(); + virtual ~NodesTest() { CleanupDirs(); } NodesTest(const NodesTest&) = delete; NodesTest(NodesTest&&) = delete; NodesTest& operator=(const NodesTest&) = delete; @@ -201,8 +203,6 @@ struct NodesTest : virtual WithDataDir { void CleanupDirs(); - void TearDown() override; - std::vector make_node_cfgs(size_t total_count, size_t validators_count = 1, uint tests_speed = 1, bool enable_rpc_http = false, bool enable_rpc_ws = false); diff --git a/tests/test_util/src/node_dag_creation_fixture.cpp b/tests/test_util/src/node_dag_creation_fixture.cpp index 729ef730db..45f6a48ad6 100644 --- a/tests/test_util/src/node_dag_creation_fixture.cpp +++ b/tests/test_util/src/node_dag_creation_fixture.cpp @@ -46,14 +46,14 @@ void NodeDagCreationFixture::deployContract() { WAIT_EXPECT_TRUE(ctx, node->getDB()->transactionFinalized(trx->getHash())); if (!contract_addr) { - auto receipt = node->getFinalChain()->transaction_receipt(trx->getHash()); + auto receipt = node->getFinalChain()->transactionReceipt(trx->getHash()); WAIT_EXPECT_TRUE(ctx, receipt.has_value()); WAIT_EXPECT_TRUE(ctx, receipt->new_contract_address.has_value()); contract_addr = receipt->new_contract_address; } - auto r = node->getFinalChain()->transaction_receipt(trx->getHash()); + auto r = node->getFinalChain()->transactionReceipt(trx->getHash()); - WAIT_EXPECT_TRUE(ctx, !node->getFinalChain()->get_code(contract_addr.value()).empty()); + WAIT_EXPECT_TRUE(ctx, !node->getFinalChain()->getCode(contract_addr.value()).empty()); }); ASSERT_TRUE(contract_addr.has_value()); std::cout << "Contract deployed: " << contract_addr.value() << std::endl; @@ -139,8 +139,9 @@ std::vector NodeDagCreationFixture::gen std::vector trx_hashes; std::transform(trx_itr, trx_itr_next, std::back_inserter(trx_hashes), [](std::shared_ptr trx) { return trx->getHash(); }); - DagBlock blk(pivot, level, tips, trx_hashes, trx_per_block * trx_estimation, vdf, node->getSecretKey()); - this_level_blocks.push_back(blk.getHash()); + auto blk = std::make_shared(pivot, level, tips, trx_hashes, trx_per_block * trx_estimation, vdf, + node->getSecretKey()); + this_level_blocks.push_back(blk->getHash()); result.emplace_back(DagBlockWithTxs{blk, SharedTransactions(trx_itr, trx_itr_next)}); trx_itr = trx_itr_next; } @@ -155,11 +156,11 @@ std::vector NodeDagCreationFixture::gen vdf_sortition::VdfSortition vdf(vdf_config, node->getVrfSecretKey(), vrf_wrapper::VrfSortitionBase::makeVrfInput(level, period_block_hash), 1, 1); vdf.computeVdfSolution(vdf_config, dag_genesis.asBytes(), false); - DagBlock blk(pivot, level, tips, {transactions.rbegin()->get()->getHash()}, trx_per_block * trx_estimation, vdf, - node->getSecretKey()); + auto blk = std::make_shared(pivot, level, tips, vec_trx_t{transactions.rbegin()->get()->getHash()}, + trx_per_block * trx_estimation, vdf, node->getSecretKey()); result.emplace_back(DagBlockWithTxs{blk, SharedTransactions(transactions.rbegin(), transactions.rbegin() + 1)}); - pivot = blk.getHash(); - tips = {blk.getHash()}; + pivot = blk->getHash(); + tips = {blk->getHash()}; trx_itr_next++; EXPECT_EQ(trx_itr_next, transactions.end()); diff --git a/tests/test_util/src/samples.cpp b/tests/test_util/src/samples.cpp index 151beec493..b25fff901c 100644 --- a/tests/test_util/src/samples.cpp +++ b/tests/test_util/src/samples.cpp @@ -1,7 +1,7 @@ #include "test_util/samples.hpp" namespace taraxa::core_tests::samples { -bool sendTrx(uint64_t count, unsigned port) { +bool sendTrx(uint64_t count, unsigned port, dev::Secret secret) { auto pattern = R"( curl --silent -m 10 --output /dev/null -d \ '{ @@ -21,9 +21,8 @@ bool sendTrx(uint64_t count, unsigned port) { }' 0.0.0.0:%s )"; for (uint64_t i = 0; i < count; ++i) { - auto retcode = system(fmt(pattern, i + 1, val_t(TEST_TX_GAS_LIMIT), val_t(0), addr_t::random(), - samples::TX_GEN->getRandomUniqueSenderSecret().makeInsecure(), port) - .c_str()); + auto retcode = system( + fmt(pattern, i + 1, val_t(TEST_TX_GAS_LIMIT), val_t(0), addr_t::random(), secret.makeInsecure(), port).c_str()); if (retcode != 0) { return false; } @@ -40,113 +39,84 @@ SharedTransactions createSignedTrxSamples(unsigned start, unsigned num, secret_t return trxs; } -std::vector createMockDagBlkSamples(unsigned pivot_start, unsigned blk_num, unsigned trx_start, - unsigned trx_len, unsigned trx_overlap) { - assert(pivot_start + blk_num < std::numeric_limits::max()); - std::vector blks; - unsigned trx = trx_start; - for (auto i = pivot_start; i < blk_num; ++i) { - blk_hash_t pivot(i); - blk_hash_t hash(i + 1); - vec_trx_t trxs; - for (unsigned i = 0; i < trx_len; ++i, trx++) { - trxs.emplace_back(trx_hash_t(trx)); - } - for (unsigned i = 0; i < trx_overlap; ++i) { - trx--; - } - - DagBlock blk(blk_hash_t(pivot), // pivot - level_t(0), // level - {blk_hash_t(2), blk_hash_t(3), blk_hash_t(4)}, // tips - trxs, // trxs - sig_t(7777), // sig - blk_hash_t(hash), // hash - addr_t(12345)); // sender - - blks.emplace_back(blk); - } - return blks; -} - -std::vector createMockDag0(const blk_hash_t& genesis) { - std::vector blks; - DagBlock blk1(genesis, // pivot - 1, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk2(genesis, // pivot - 1, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk3(genesis, // pivot - 1, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk4(blk1.getHash(), // pivot - 2, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk5(blk1.getHash(), // pivot - 2, // level - {blk2.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk6(blk3.getHash(), // pivot - 2, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk7(blk5.getHash(), // pivot - 3, // level - {blk6.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk8(blk5.getHash(), // pivot - 3, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk9(blk6.getHash(), // pivot - 3, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk10(blk7.getHash(), // pivot - 4, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk11(blk7.getHash(), // pivot - 4, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk12(blk9.getHash(), // pivot - 4, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk13(blk10.getHash(), // pivot - 5, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk14(blk11.getHash(), // pivot - 5, // level - {blk12.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk15(blk13.getHash(), // pivot - 6, // level - {blk14.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk16(blk13.getHash(), // pivot - 6, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk17(blk12.getHash(), // pivot - 5, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk18(blk15.getHash(), // pivot - 7, // level - {blk8.getHash(), blk16.getHash(), blk17.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk19(blk18.getHash(), // pivot - 8, // level - {}, // tips - {}, secret_t::random()); +std::vector> createMockDag0(const blk_hash_t& genesis) { + std::vector> blks; + auto blk1 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk2 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk3 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk4 = std::make_shared(blk1->getHash(), // pivot + 2, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk5 = std::make_shared(blk1->getHash(), // pivot + 2, // level + vec_blk_t{blk2->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk6 = std::make_shared(blk3->getHash(), // pivot + 2, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk7 = std::make_shared(blk5->getHash(), // pivot + 3, // level + vec_blk_t{blk6->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk8 = std::make_shared(blk5->getHash(), // pivot + 3, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk9 = std::make_shared(blk6->getHash(), // pivot + 3, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk10 = std::make_shared(blk7->getHash(), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk11 = std::make_shared(blk7->getHash(), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk12 = std::make_shared(blk9->getHash(), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk13 = std::make_shared(blk10->getHash(), // pivot + 5, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk14 = std::make_shared(blk11->getHash(), // pivot + 5, // level + vec_blk_t{blk12->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk15 = std::make_shared(blk13->getHash(), // pivot + 6, // level + vec_blk_t{blk14->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk16 = std::make_shared(blk13->getHash(), // pivot + 6, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk17 = std::make_shared(blk12->getHash(), // pivot + 5, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk18 = std::make_shared(blk15->getHash(), // pivot + 7, // level + vec_blk_t{blk8->getHash(), blk16->getHash(), blk17->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk19 = std::make_shared(blk18->getHash(), // pivot + 8, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); blks.emplace_back(blk1); blks.emplace_back(blk2); blks.emplace_back(blk3); @@ -170,143 +140,143 @@ std::vector createMockDag0(const blk_hash_t& genesis) { return blks; } -std::vector createMockDag1(const blk_hash_t& genesis) { - std::vector blks; - DagBlock dummy; - DagBlock blk1(genesis, // pivot - 1, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(1), // hash - addr_t(123)); +std::vector> createMockDag1(const blk_hash_t& genesis) { + std::vector> blks; + std::shared_ptr dummy; + auto blk1 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(1), // hash + addr_t(123)); - DagBlock blk2(genesis, // pivot - 1, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(2), // hash - addr_t(123)); - DagBlock blk3(genesis, // pivot - 1, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(3), // hash - addr_t(123)); - DagBlock blk4(blk_hash_t(1), // pivot - 2, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(4), // hash - addr_t(123)); - DagBlock blk5(blk_hash_t(1), // pivot - 2, // level - {blk_hash_t(2)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(5), // hash - addr_t(123)); - DagBlock blk6(blk_hash_t(3), // pivot - 2, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(6), // hash - addr_t(123)); - DagBlock blk7(blk_hash_t(5), // pivot - 3, // level - {blk_hash_t(6)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(7), // hash - addr_t(123)); - DagBlock blk8(blk_hash_t(5), // pivot - 3, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(8), // hash - addr_t(123)); - DagBlock blk9(blk_hash_t(6), // pivot - 3, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(9), // hash - addr_t(123)); - DagBlock blk10(blk_hash_t(7), // pivot - 4, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(10), // hash - addr_t(123)); - DagBlock blk11(blk_hash_t(7), // pivot - 4, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(11), // hash - addr_t(123)); - DagBlock blk12(blk_hash_t(9), // pivot - 4, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(12), // hash - addr_t(123)); - DagBlock blk13(blk_hash_t(10), // pivot - 5, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(13), // hash - addr_t(123)); - DagBlock blk14(blk_hash_t(11), // pivot - 5, // level - {blk_hash_t(12)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(14), // hash - addr_t(123)); - DagBlock blk15(blk_hash_t(13), // pivot - 6, // level - {blk_hash_t(14)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(15), // hash - addr_t(123)); - DagBlock blk16(blk_hash_t(13), // pivot - 6, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(16), // hash - addr_t(123)); - DagBlock blk17(blk_hash_t(12), // pivot - 5, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(17), // hash - addr_t(123)); - DagBlock blk18(blk_hash_t(15), // pivot - 7, // level - {blk_hash_t(8), blk_hash_t(16), blk_hash_t(17)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(18), // hash - addr_t(123)); - DagBlock blk19(blk_hash_t(18), // pivot - 8, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(19), // hash - addr_t(123)); + auto blk2 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(2), // hash + addr_t(123)); + auto blk3 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(3), // hash + addr_t(123)); + auto blk4 = std::make_shared(blk_hash_t(1), // pivot + 2, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(4), // hash + addr_t(123)); + auto blk5 = std::make_shared(blk_hash_t(1), // pivot + 2, // level + vec_blk_t{blk_hash_t(2)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(5), // hash + addr_t(123)); + auto blk6 = std::make_shared(blk_hash_t(3), // pivot + 2, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(6), // hash + addr_t(123)); + auto blk7 = std::make_shared(blk_hash_t(5), // pivot + 3, // level + vec_blk_t{blk_hash_t(6)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(7), // hash + addr_t(123)); + auto blk8 = std::make_shared(blk_hash_t(5), // pivot + 3, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(8), // hash + addr_t(123)); + auto blk9 = std::make_shared(blk_hash_t(6), // pivot + 3, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(9), // hash + addr_t(123)); + auto blk10 = std::make_shared(blk_hash_t(7), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(10), // hash + addr_t(123)); + auto blk11 = std::make_shared(blk_hash_t(7), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(11), // hash + addr_t(123)); + auto blk12 = std::make_shared(blk_hash_t(9), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(12), // hash + addr_t(123)); + auto blk13 = std::make_shared(blk_hash_t(10), // pivot + 5, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(13), // hash + addr_t(123)); + auto blk14 = std::make_shared(blk_hash_t(11), // pivot + 5, // level + vec_blk_t{blk_hash_t(12)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(14), // hash + addr_t(123)); + auto blk15 = std::make_shared(blk_hash_t(13), // pivot + 6, // level + vec_blk_t{blk_hash_t(14)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(15), // hash + addr_t(123)); + auto blk16 = std::make_shared(blk_hash_t(13), // pivot + 6, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(16), // hash + addr_t(123)); + auto blk17 = std::make_shared(blk_hash_t(12), // pivot + 5, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(17), // hash + addr_t(123)); + auto blk18 = std::make_shared(blk_hash_t(15), // pivot + 7, // level + vec_blk_t{blk_hash_t(8), blk_hash_t(16), blk_hash_t(17)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(18), // hash + addr_t(123)); + auto blk19 = std::make_shared(blk_hash_t(18), // pivot + 8, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(19), // hash + addr_t(123)); blks.emplace_back(dummy); blks.emplace_back(blk1); blks.emplace_back(blk2); diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index c0eda415de..4a225a8b7b 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -1,5 +1,8 @@ #include "test_util/test_util.hpp" +#include + +#include "common/encoding_solidity.hpp" #include "pbft/pbft_manager.hpp" #include "vote_manager/vote_manager.hpp" @@ -53,8 +56,8 @@ TransactionClient::Context TransactionClient::process(const std::shared_ptrgetHash(); if (wait_executed) { - auto success = wait(wait_opts_, - [&, this](auto& ctx) { ctx.fail_if(!node_->getFinalChain()->transaction_location(trx_hash)); }); + auto success = + wait(wait_opts_, [&, this](auto& ctx) { ctx.fail_if(!node_->getFinalChain()->transactionLocation(trx_hash)); }); if (success) { ctx.stage = TransactionStage::executed; } @@ -92,6 +95,22 @@ SharedTransaction make_delegate_tx(const FullNodeConfig& sender_node_cfg, const sender_node_cfg.node_secret, kContractAddress, sender_node_cfg.genesis.chain_id); } +SharedTransaction make_undelegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, uint64_t nonce, + const u256& gas_price) { + const auto addr = dev::toAddress(sender_node_cfg.node_secret); + const auto input = util::EncodingSolidity::packFunctionCall("undelegate(address,uint256)", addr, value); + return std::make_shared(nonce, 0, gas_price, TEST_TX_GAS_LIMIT, std::move(input), + sender_node_cfg.node_secret, kContractAddress, sender_node_cfg.genesis.chain_id); +} + +SharedTransaction make_redelegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, const Address& to, + uint64_t nonce, const u256& gas_price) { + const auto addr = dev::toAddress(sender_node_cfg.node_secret); + const auto input = util::EncodingSolidity::packFunctionCall("reDelegate(address,address,uint256)", addr, to, value); + return std::make_shared(nonce, 0, gas_price, TEST_TX_GAS_LIMIT, std::move(input), + sender_node_cfg.node_secret, kContractAddress, sender_node_cfg.genesis.chain_id); +} + u256 own_balance(const std::shared_ptr& node) { return node->getFinalChain()->getBalance(node->getAddress()).first; } @@ -254,8 +273,6 @@ void NodesTest::CleanupDirs() { } } -void NodesTest::TearDown() { CleanupDirs(); } - std::vector NodesTest::make_node_cfgs(size_t total_count, size_t validators_count, uint tests_speed, bool enable_rpc_http, bool enable_rpc_ws) { diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index 897ba5a63f..66a4312e13 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -6,9 +6,9 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "config/genesis.hpp" -#include "final_chain/final_chain_impl.hpp" +#include "final_chain/final_chain.hpp" #include "final_chain/trie_common.hpp" #include "logger/logger.hpp" #include "pbft/pbft_manager.hpp" @@ -20,16 +20,12 @@ namespace taraxa::core_tests { const unsigned NUM_TRX = 40; -const unsigned NUM_BLK = 4; -const unsigned BLK_TRX_LEN = 4; -const unsigned BLK_TRX_OVERLAP = 1; auto g_secret = Lazy([] { return dev::Secret("3800b2875669d9b2053c1aff9224ecfdc411423aac5b5a73d7a45ced1c3b9dcd", dev::Secret::ConstructFromStringType::FromHex); }); auto g_key_pair = Lazy([] { return dev::KeyPair(g_secret); }); auto g_signed_trx_samples = Lazy([] { return samples::createSignedTrxSamples(1, NUM_TRX, g_secret); }); -auto g_blk_samples = Lazy([] { return samples::createMockDagBlkSamples(0, NUM_BLK, 0, BLK_TRX_LEN, BLK_TRX_OVERLAP); }); struct TransactionTest : NodesTest {}; @@ -122,7 +118,7 @@ TEST_F(TransactionTest, sig) { TEST_F(TransactionTest, verifiers) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - auto final_chain = std::make_shared(db, cfg, addr_t{}); + auto final_chain = std::make_shared(db, cfg, addr_t{}); TransactionManager trx_mgr(cfg, db, final_chain, addr_t()); // insert trx std::thread t([&trx_mgr]() { @@ -143,7 +139,7 @@ TEST_F(TransactionTest, verifiers) { TEST_F(TransactionTest, transaction_limit) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); + TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); // insert trx std::thread t([&trx_mgr]() { for (auto const& t : *g_signed_trx_samples) { @@ -164,7 +160,7 @@ TEST_F(TransactionTest, transaction_limit) { TEST_F(TransactionTest, prepare_signed_trx_for_propose) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); + TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); std::thread insertTrx([&trx_mgr]() { for (auto const& t : *g_signed_trx_samples) { trx_mgr.insertTransaction(t); @@ -194,7 +190,7 @@ TEST_F(TransactionTest, prepare_signed_trx_for_propose) { TEST_F(TransactionTest, transaction_low_nonce) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - auto final_chain = std::make_shared(db, cfg, addr_t{}); + auto final_chain = std::make_shared(db, cfg, addr_t{}); TransactionManager trx_mgr(cfg, db, final_chain, addr_t()); const auto& trx_2 = g_signed_trx_samples[1]; auto& trx_1 = g_signed_trx_samples[0]; @@ -203,7 +199,7 @@ TEST_F(TransactionTest, transaction_low_nonce) { EXPECT_TRUE(trx_mgr.insertTransaction(trx_1).first); EXPECT_TRUE(trx_mgr.insertTransaction(trx_2).first); std::vector trx_hashes{trx_1->getHash(), trx_2->getHash()}; - DagBlock dag_blk({}, {}, {}, trx_hashes, secret_t::random()); + auto dag_blk = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, trx_hashes, secret_t::random()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = @@ -216,7 +212,7 @@ TEST_F(TransactionTest, transaction_low_nonce) { auto batch = db->createWriteBatch(); db->savePeriodData(period_data, batch); db->commitWriteBatch(batch); - final_chain->finalize(std::move(period_data), {dag_blk.getHash()}).get(); + final_chain->finalize(std::move(period_data), {dag_blk->getHash()}).get(); // Verify low nonce transaction is detected in verification auto low_nonce_trx = std::make_shared(1, 101, 0, 100000, dev::bytes(), g_secret, addr_t::random()); @@ -234,7 +230,7 @@ TEST_F(TransactionTest, transaction_low_nonce) { // Verify insufficient balance transaction is detected in verification auto trx_insufficient_balance = - std::make_shared(3, final_chain->get_account(dev::toAddress(g_secret))->balance + 1, 0, 100000, + std::make_shared(3, final_chain->getAccount(dev::toAddress(g_secret))->balance + 1, 0, 100000, dev::bytes(), g_secret, addr_t::random()); result = trx_mgr.verifyTransaction(trx_insufficient_balance); EXPECT_EQ(result.first, true); @@ -266,7 +262,7 @@ TEST_F(TransactionTest, transaction_low_nonce) { TEST_F(TransactionTest, transaction_concurrency) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); + TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); bool stopped = false; // Insert transactions to memory pool and keep trying to insert them again on separate thread, it should always fail std::thread insertTrx([&trx_mgr, &stopped]() { @@ -650,7 +646,7 @@ TEST_F(TransactionTest, typed_deserialization) { TEST_F(TransactionTest, zero_gas_price_limit) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - auto final_chain = std::make_shared(db, cfg, addr_t{}); + auto final_chain = std::make_shared(db, cfg, addr_t{}); TransactionManager trx_mgr(cfg, db, final_chain, addr_t()); auto make_trx_with_price = [](uint64_t price) { return std::make_shared(1, 100, price, 100000, dev::bytes(), g_secret, addr_t::random()); @@ -673,7 +669,7 @@ TEST_F(TransactionTest, gas_price_limiting) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); auto minimum_price = cfg.genesis.gas_price.minimum_price = 10; - auto final_chain = std::make_shared(db, cfg, addr_t{}); + auto final_chain = std::make_shared(db, cfg, addr_t{}); TransactionManager trx_mgr(cfg, db, final_chain, addr_t()); auto make_trx_with_price = [](uint64_t price) { return std::make_shared(1, 100, price, 100000, dev::bytes(), g_secret, addr_t::random()); diff --git a/tests/vote_test.cpp b/tests/vote_test.cpp index fa687a9e44..5356a701d3 100644 --- a/tests/vote_test.cpp +++ b/tests/vote_test.cpp @@ -1,7 +1,7 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" #include "network/network.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp"