diff --git a/.circleci/config.yml b/.circleci/config.yml index dc07605fc5..1d1faae4e2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -26,7 +26,6 @@ orbs: github-release: izumin5210/github-release@0.1.1 gcp-gcr: circleci/gcp-gcr@0.13.0 gcp-cli: circleci/gcp-cli@2.4.1 - docker-cache: cci-x/docker-registry-image-cache@0.2.12 helm-release: taraxa/helm-release@0.1.1 commands: @@ -268,7 +267,11 @@ commands: --set consensusnode.persistence.enabled=true \ --set node.serviceMonitor.enabled=true \ --set bootnode.serviceMonitor.enabled=false \ - --set consensusnode.serviceMonitor.enabled=true + --set consensusnode.serviceMonitor.enabled=true \ + --set node.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ + --set bootnode.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ + --set consensusnode.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ + --set node.indexer.enabled=true fi cleanup_prnet_chart: @@ -324,39 +327,57 @@ commands: echo "export HELM_TEST_NAME=pr-${PR}" >> $BASH_ENV echo "export DOCKER_BRANCH_TAG=$(./scripts/docker_tag_from_branch.sh $CIRCLE_BRANCH)" >> $BASH_ENV echo "export GCP_IMAGE=gcr.io/${GOOGLE_PROJECT_ID}/${IMAGE}" >> $BASH_ENV - - run: - name: Clean old Images if disk available is lower than 15G - command: | - for time in 120 96 72 48 24 12 6 3 1 0 - do - if [ $(df /var/lib/docker --block-size=1073741824 --output=avail|grep -v Avail) -lt 15 ]; then - df /var/lib/docker --block-size=1073741824 --output=avail - echo "Pruning images older than ${time}h" - docker image prune -a --force --filter "until=${time}h" - fi - done build: - description: Builds docker images + description: Builds docker image (${IMAGE}) steps: - - run: - name: Build builder image - command: | - docker build -t ${IMAGE}:${VERSION} --target builder . - run: name: Checkout Submodules command: | git submodule sync git submodule update --init --recursive --jobs 8 - run: - name: Build ctest image + name: Compile and build binaries command: | docker build -t ${IMAGE}-ctest:${VERSION} --target build . - run: - name: Build taraxad image + name: Build final Docker image (${IMAGE}) command: | docker build -t ${IMAGE}:${VERSION} . + build_builder: + description: Builds docker image + steps: + - run: + name: Create builder + command: | + docker buildx create --name taraxa-builder --use --bootstrap + + - run: + name: Build builder image (multiarch) + command: | + # docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . + docker buildx build \ + --output=type=local,dest=/tmp \ + --platform linux/arm64,linux/amd64 \ + --tag taraxa/${IMAGE}:123 \ + -f builder.Dockerfile . + + tag_builder: + description: Tags docker images (builder) + steps: + - run: + name: Tag images + command: | + + if [[ ${CIRCLE_TAG} != "" ]];then + TAG=$(echo ${CIRCLE_TAG} | sed 's/^builder-//g') + docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${TAG} + docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:latest + else + docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${VERSION} + fi + tag: description: Tags docker images steps: @@ -458,8 +479,16 @@ commands: - run: name: Push Images command: | - docker push taraxa/${IMAGE}:${CIRCLE_TAG} - docker push taraxa/${IMAGE}:latest + + if [[ ${CIRCLE_TAG} != "" ]];then + TAG=$(echo ${CIRCLE_TAG} | sed 's/^builder-//g') + docker push taraxa/${IMAGE}:${TAG} + docker push taraxa/${IMAGE}:latest + else + docker push taraxa/${IMAGE}:${VERSION} + fi + + test: description: Run tests @@ -568,6 +597,8 @@ jobs: https://explorer-pr-<>.prnet.taraxa.io \ RPC>.prnet.taraxa.io >\ https://rpc-pr-<>.prnet.taraxa.io \ + Indexer>.prnet.taraxa.io >\ + https://indexer-pr-<>.prnet.taraxa.io \ \

Boot Nodes

\ \ @@ -685,6 +716,34 @@ jobs: - store_artifacts: path: tmp_docker + build-builder-docker-image: + environment: + - IMAGE: taraxa-builder + - CONAN_REVISIONS_ENABLED: 1 + machine: + image: ubuntu-2204:2022.04.2 + docker_layer_caching: true + resource_class: large + steps: + - checkout + - prepare + - run: + name: List images restored from DLC + command: | + docker images + - build_builder + - run: + name: List images to be saved in DLC + command: | + docker images + - run: + name: Show layers of taraxa-builder image + command: | + docker history taraxa-builder:${VERSION} +# - tag_builder +# - push_dockerhub + + release-docker-image: environment: - IMAGE: taraxa-node @@ -989,6 +1048,47 @@ workflows: - build-linux - build-mac + + # ### workflows for builder image ### # + # run this workflow for specific branches 'builder/*' + build-builder-docker-image: + when: + and: + - not: << pipeline.parameters.deploy_prnet >> + - not: << pipeline.parameters.redeploy_prnet >> + - not: << pipeline.parameters.cleanup_prnet >> + jobs: + - build-builder-docker-image: + filters: + branches: + only: + - /^builder\/.*/ + - /^chore\/builder-.*/ + - /^fix\/builder-.*/ + - /^feature\/builder-.*/ + context: + - DOCKERHUB + + # run this workflow for specific tags 'builder/*' + release-builder-docker-image: + when: + and: + - not: << pipeline.parameters.deploy_prnet >> + - not: << pipeline.parameters.redeploy_prnet >> + - not: << pipeline.parameters.cleanup_prnet >> + jobs: + - build-builder-docker-image: + filters: + branches: + ignore: /.*/ + tags: + only: /^builder-v\d+.\d+.\d+/ + context: + - DOCKERHUB + # ### workflows for builder image ### # + + + # ### workflows for taraxa-node image ### # # run this workflow for all branches apart those reserved for chart build-docker-image: when: @@ -1005,6 +1105,10 @@ workflows: - /^chore\/chart-.*/ - /^fix\/chart-.*/ - /^feature\/chart-.*/ + - /^builder\/.*/ + - /^chore\/builder-.*/ + - /^fix\/builder-.*/ + - /^feature\/builder-.*/ context: - TARAXA - GCP @@ -1029,7 +1133,9 @@ workflows: - K8S - GCR - DOCKERHUB + # ### workflows for taraxa-node image ### # + # ### workflows for taraxa-node helm chart ### # # run this workflow for branches specified below build-helm-chart: when: @@ -1055,7 +1161,6 @@ workflows: - /^fix\/chart-.*/ - /^feature\/chart-.*/ - # run this workflow for tags, like chart-vX.Y.Z release-helm-chart: jobs: @@ -1071,4 +1176,5 @@ workflows: branches: ignore: /.*/ tags: - only: /^chart-v\d+.\d+.\d+/ \ No newline at end of file + only: /^chart-v\d+.\d+.\d+/ + # ### workflows for taraxa-node helm chart ### # diff --git a/CMakeLists.txt b/CMakeLists.txt index 3f30a94ab6..5ff575d0dd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,7 +13,7 @@ set(TARAXA_NET_VERSION 1) set(TARAXA_DB_MAJOR_VERSION 1) # Minor version should be modified when changes to the database are made in the tables that can be rebuilt from the # basic tables -set(TARAXA_DB_MINOR_VERSION 0) +set(TARAXA_DB_MINOR_VERSION 1) # Defines Taraxa library target. project(taraxa-node VERSION ${TARAXA_VERSION}) @@ -48,6 +48,7 @@ endif() set(CMAKE_CXX_FLAGS_RELEASE "-O3") set(CMAKE_CXX_FLAGS_DEBUG "-g") set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-g -O2") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") # Enable LTO option(TARAXA_ENABLE_LTO "Build taraxad with LTO (ON or OFF)" OFF) @@ -161,8 +162,15 @@ else() include(${CMAKE_BINARY_DIR}/conan.cmake) - conan_cmake_run(CONANFILE conanfile.py BUILD_TYPE ${CMAKE_BUILD_TYPE} BUILD missing - BASIC_SETUP CMAKE_TARGETS KEEP_RPATHS PROFILE ${CONAN_PROFILE}) + conan_cmake_run(CONANFILE conanfile.py + BUILD_TYPE ${CMAKE_BUILD_TYPE} + CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} + BASIC_SETUP + CMAKE_TARGETS + KEEP_RPATHS + PROFILE ${CONAN_PROFILE} + BUILD missing + ) set(CONAN_EXPORTED true CACHE BOOL "Is conan already run on the project") endif() @@ -203,6 +211,11 @@ include(CMakeModules/git_info.cmake) find_package(GMP) find_package(MPFR) +include(ExternalProject) +# use JSONCPP library from conan for JSONRPCCPP build +set(JSONCPP_INCLUDE_DIR ${CONAN_INCLUDE_DIRS_JSONCPP}) +include(ProjectJSONRPCCPP) + # Add sub-directories cmakes add_subdirectory(submodules) add_subdirectory(libraries) diff --git a/CMakeModules/EthDependencies.cmake b/CMakeModules/EthDependencies.cmake deleted file mode 100644 index 951bba9bb6..0000000000 --- a/CMakeModules/EthDependencies.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# The Windows platform has not historically had any standard packaging system for delivering -# versioned releases of libraries. Homebrew and PPA perform that function for macOS and Ubuntu -# respectively, and there are analogous standards for other Linux distros. In the absense of -# such a standard, we have chosen to make a "fake packaging system" for cpp-ethereum, which is -# implemented in https://github.com/ethereum/cpp-dependencies. -# -# NOTE - In the last couple of years, the NuGet packaging system, first created for delivery -# of .NET packages, has added support for C++ packages, and it may be possible for us to migrate -# our "fake package server" to that real package server. That would certainly be preferable -# to rolling our own, but it also puts us at the mercy of intermediate package maintainers who -# may be inactive. There is not a fantastic range of packages available at the time of writing, -# so we might find that such a move turns us into becoming the package maintainer for our -# dependencies. Not a net win :-) -# -# "Windows - Try to use NuGet C++ packages" -# https://github.com/ethereum/webthree-umbrella/issues/509 -# -# Perhaps a better alternative is to step away from dependencies onto binary releases entirely, -# and switching to build-from-source for some (or all) of our dependencies, especially if they -# are small. That gives us total control, but at the cost of longer build times. That is the -# approach which Pawel has taken for LLVM in https://github.com/ethereum/evmjit. - -if (MSVC) - if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.0.0) - message(FATAL_ERROR "ERROR - As of the 1.3.0 release, cpp-ethereum only supports Visual Studio 2015 or newer.\nPlease download from https://www.visualstudio.com/en-us/products/visual-studio-community-vs.aspx.") - else() - get_filename_component(ETH_DEPENDENCY_INSTALL_DIR "${CMAKE_CURRENT_LIST_DIR}/../deps/x64" ABSOLUTE) - endif() - set (CMAKE_PREFIX_PATH ${ETH_DEPENDENCY_INSTALL_DIR} ${CMAKE_PREFIX_PATH}) -endif() - -# custom cmake scripts -set(ETH_CMAKE_DIR ${CMAKE_CURRENT_LIST_DIR}) -set(ETH_SCRIPTS_DIR ${ETH_CMAKE_DIR}/scripts) \ No newline at end of file diff --git a/CMakeModules/EthExecutableHelper.cmake b/CMakeModules/EthExecutableHelper.cmake deleted file mode 100644 index eaf2223b35..0000000000 --- a/CMakeModules/EthExecutableHelper.cmake +++ /dev/null @@ -1,61 +0,0 @@ -# -# this function requires the following variables to be specified: -# ETH_VERSION -# PROJECT_NAME -# PROJECT_VERSION -# PROJECT_COPYRIGHT_YEAR -# PROJECT_VENDOR -# PROJECT_DOMAIN_SECOND -# PROJECT_DOMAIN_FIRST -# SRC_LIST -# HEADERS -# -# params: -# ICON -# - -macro(eth_copy_dll EXECUTABLE DLL) - # dlls must be unsubstitud list variable (without ${}) in format - # optimized;path_to_dll.dll;debug;path_to_dlld.dll - if(DEFINED MSVC) - list(GET ${DLL} 1 DLL_RELEASE) - list(GET ${DLL} 3 DLL_DEBUG) - add_custom_command(TARGET ${EXECUTABLE} - PRE_BUILD - COMMAND ${CMAKE_COMMAND} ARGS - -DDLL_RELEASE="${DLL_RELEASE}" - -DDLL_DEBUG="${DLL_DEBUG}" - -DCONF="$" - -DDESTINATION="${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}" - -P "${ETH_SCRIPTS_DIR}/copydlls.cmake" - ) - endif() -endmacro() - -macro(eth_copy_dlls EXECUTABLE) - foreach(dll ${ARGN}) - eth_copy_dll(${EXECUTABLE} ${dll}) - endforeach(dll) -endmacro() - -macro(jsonrpcstub_create EXECUTABLE SPEC SERVERNAME SERVERDIR SERVERFILENAME CLIENTNAME CLIENTDIR CLIENTFILENAME) - if (ETH_JSON_RPC_STUB) - add_custom_target(${SPEC}stub) - add_custom_command( - TARGET ${SPEC}stub - POST_BUILD - DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${SPEC}" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMAND ${CMAKE_COMMAND} -DETH_SPEC_PATH="${CMAKE_CURRENT_SOURCE_DIR}/${SPEC}" -DETH_SOURCE_DIR="${CMAKE_SOURCE_DIR}" -DETH_CMAKE_DIR="${ETH_CMAKE_DIR}" - -DETH_CLIENT_DIR="${CLIENTDIR}" - -DETH_CLIENT_NAME=${CLIENTNAME} - -DETH_CLIENT_FILENAME=${CLIENTFILENAME} - -DETH_SERVER_DIR="${SERVERDIR}" - -DETH_SERVER_NAME=${SERVERNAME} - -DETH_SERVER_FILENAME=${SERVERFILENAME} - -DETH_JSON_RPC_STUB="${ETH_JSON_RPC_STUB}" - -P "${ETH_SCRIPTS_DIR}/jsonrpcstub.cmake" - ) - add_dependencies(${EXECUTABLE} ${SPEC}stub) - endif () -endmacro() \ No newline at end of file diff --git a/CMakeModules/EthUtils.cmake b/CMakeModules/EthUtils.cmake deleted file mode 100644 index e41bf2bd32..0000000000 --- a/CMakeModules/EthUtils.cmake +++ /dev/null @@ -1,82 +0,0 @@ -# -# renames the file if it is different from its destination -include(CMakeParseArguments) -# -macro(replace_if_different SOURCE DST) - set(extra_macro_args ${ARGN}) - set(options CREATE) - set(one_value_args) - set(multi_value_args) - cmake_parse_arguments(REPLACE_IF_DIFFERENT "${options}" "${one_value_args}" "${multi_value_args}" "${extra_macro_args}") - - if (REPLACE_IF_DIFFERENT_CREATE AND (NOT (EXISTS "${DST}"))) - file(WRITE "${DST}" "") - endif() - - execute_process(COMMAND ${CMAKE_COMMAND} -E compare_files "${SOURCE}" "${DST}" RESULT_VARIABLE DIFFERENT OUTPUT_QUIET ERROR_QUIET) - - if (DIFFERENT) - execute_process(COMMAND ${CMAKE_COMMAND} -E rename "${SOURCE}" "${DST}") - else() - execute_process(COMMAND ${CMAKE_COMMAND} -E remove "${SOURCE}") - endif() -endmacro() - -macro(eth_add_test NAME) - - # parse arguments here - set(commands) - set(current_command "") - foreach (arg ${ARGN}) - if (arg STREQUAL "ARGS") - if (current_command) - list(APPEND commands ${current_command}) - endif() - set(current_command "") - else () - set(current_command "${current_command} ${arg}") - endif() - endforeach(arg) - list(APPEND commands ${current_command}) - - message(STATUS "test: ${NAME} | ${commands}") - - # create tests - set(index 0) - list(LENGTH commands count) - while (index LESS count) - list(GET commands ${index} test_arguments) - - set(run_test "--run_test=${NAME}") - add_test(NAME "${NAME}.${index}" COMMAND testeth ${run_test} ${test_arguments}) - - math(EXPR index "${index} + 1") - endwhile(index LESS count) - - # add target to run them - add_custom_target("test.${NAME}" - DEPENDS testeth - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -DETH_TEST_NAME="${NAME}" -DCTEST_COMMAND="${CTEST_COMMAND}" -P "${ETH_SCRIPTS_DIR}/runtest.cmake" - ) - -endmacro() - -# In Windows split repositories build we need to be checking whether or not -# Debug/Release or both versions were built for the config phase to run smoothly -macro(eth_check_library_link L) - if (${${L}_LIBRARY} AND ${${L}_LIBRARY} EQUAL "${L}_LIBRARY-NOTFOUND") - unset(${${L}_LIBRARY}) - endif() - if (${${L}_LIBRARY_DEBUG} AND ${${L}_LIBRARY_DEBUG} EQUAL "${L}_LIBRARY_DEBUG-NOTFOUND") - unset(${${L}_LIBRARY_DEBUG}) - endif() - if (${${L}_LIBRARY} AND ${${L}_LIBRARY_DEBUG}) - set(${L}_LIBRARIES optimized ${${L}_LIBRARY} debug ${${L}_LIBRARY_DEBUG}) - elseif (${${L}_LIBRARY}) - set(${L}_LIBRARIES ${${L}_LIBRARY}) - elseif (${${L}_LIBRARY_DEBUG}) - set(${L}_LIBRARIES ${${L}_LIBRARY_DEBUG}) - endif() -endmacro() - diff --git a/CMakeModules/ProjectJSONRPCCPP.cmake b/CMakeModules/ProjectJSONRPCCPP.cmake new file mode 100644 index 0000000000..38736ac4d5 --- /dev/null +++ b/CMakeModules/ProjectJSONRPCCPP.cmake @@ -0,0 +1,58 @@ +set(prefix "${CMAKE_BINARY_DIR}/deps") +set(lib_path "${prefix}/lib") +set(include_path "${prefix}/include") + +ExternalProject_Add(jsonrpccpp + PREFIX "${prefix}" + DOWNLOAD_NAME libjson-rpc-cpp-v1.4.1.tar.gz + DOWNLOAD_NO_PROGRESS TRUE + URL https://github.com/cinemast/libjson-rpc-cpp/archive/refs/tags/v1.4.1.tar.gz + URL_HASH SHA256=7a057e50d6203e4ea0a10ba5e4dbf344c48b177e5a3bf82e850eb3a783c11eb5 + CMAKE_ARGS + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_PREFIX= + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_CXX_COMPILER_LAUNCHER=${DCMAKE_CXX_COMPILER_LAUNCHER} + -DJSONCPP_INCLUDE_DIR=${JSONCPP_INCLUDE_DIR} + -DBUILD_STATIC_LIBS=1 + # disable build of parts that we don't need + -DBUILD_SHARED_LIBS=0 + -DREDIS_SERVER=0 + -DREDIS_CLIENT=0 + -DCOMPILE_TESTS=0 + -DCOMPILE_STUBGEN=0 + -DCOMPILE_EXAMPLES=0 + -DWITH_COVERAGE=0 + -DHTTP_CLIENT=0 + BUILD_COMMAND ${CMAKE_COMMAND} --build --config Release + INSTALL_COMMAND ${CMAKE_COMMAND} --build --config Release --target install + BUILD_BYPRODUCTS "${lib_path}" + DOWNLOAD_EXTRACT_TIMESTAMP NEW + LOG_CONFIGURE 0 + LOG_BUILD 0 + LOG_INSTALL 0 +) + + +add_library(Jsonrpccpp-common STATIC IMPORTED) +set_property(TARGET Jsonrpccpp-common PROPERTY IMPORTED_CONFIGURATIONS Release) +set_property(TARGET Jsonrpccpp-common PROPERTY IMPORTED_LOCATION "${lib_path}/${CMAKE_STATIC_LIBRARY_PREFIX}jsonrpccpp-common${CMAKE_STATIC_LIBRARY_SUFFIX}") +set_property(TARGET Jsonrpccpp-common PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${include_path}) +add_dependencies(Jsonrpccpp-common jsonrpccpp) + +add_library(Jsonrpccpp-server STATIC IMPORTED) +set_property(TARGET Jsonrpccpp-server PROPERTY IMPORTED_CONFIGURATIONS Release) +set_property(TARGET Jsonrpccpp-server PROPERTY IMPORTED_LOCATION "${lib_path}/${CMAKE_STATIC_LIBRARY_PREFIX}jsonrpccpp-server${CMAKE_STATIC_LIBRARY_SUFFIX}") +set_property(TARGET Jsonrpccpp-server PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${include_path}) +add_dependencies(Jsonrpccpp-server jsonrpccpp) + +add_library(Jsonrpccpp-client STATIC IMPORTED) +set_property(TARGET Jsonrpccpp-client PROPERTY IMPORTED_CONFIGURATIONS Release) +set_property(TARGET Jsonrpccpp-client PROPERTY IMPORTED_LOCATION "${lib_path}/${CMAKE_STATIC_LIBRARY_PREFIX}jsonrpccpp-client${CMAKE_STATIC_LIBRARY_SUFFIX}") +set_property(TARGET Jsonrpccpp-client PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${include_path}) +add_dependencies(Jsonrpccpp-client jsonrpccpp) + +add_library(Jsonrpccpp INTERFACE) +target_link_libraries(Jsonrpccpp INTERFACE Jsonrpccpp-common Jsonrpccpp-server Jsonrpccpp-client) diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index 9a7c8168ae..4f7a0b22bb 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -11,7 +11,8 @@ else () --error-exitcode=1 --enable=all --suppress=missingInclude - --suppress=useStlAlgorithm + # find_if - useless here + --suppress=useStlAlgorithm:${PROJECT_SOURCE_DIR}/*/pbft_sync_packet_handler.cpp --suppress=noExplicitConstructor --suppress=unknownMacro # false positive @@ -27,8 +28,16 @@ else () # TODO remove this when we solve correct exit of programs --suppress=localMutex:${PROJECT_SOURCE_DIR}/*/main.cpp # Just style warning + --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/final_chain.cpp + # exclude graphql generated + -i ${PROJECT_SOURCE_DIR}/libraries/core_libs/network/graphql/gen/ + # messy files + --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/vector_ref.h + --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/Common.h + --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/vector_ref.h + --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/Common.h + #not an issue here --suppress=virtualCallInConstructor:${PROJECT_SOURCE_DIR}/*/final_chain.cpp - # Only show found errors "--quiet" diff --git a/CMakeModules/jsonrpcstubHelper.cmake b/CMakeModules/jsonrpcstubHelper.cmake new file mode 100644 index 0000000000..66c1abe356 --- /dev/null +++ b/CMakeModules/jsonrpcstubHelper.cmake @@ -0,0 +1,25 @@ +# custom cmake scripts +set(ETH_CMAKE_DIR ${CMAKE_CURRENT_LIST_DIR}) +set(ETH_SCRIPTS_DIR ${ETH_CMAKE_DIR}/scripts) + +macro(jsonrpcstub_create EXECUTABLE SPEC SERVERNAME SERVERDIR SERVERFILENAME CLIENTNAME CLIENTDIR CLIENTFILENAME) + if (ETH_JSON_RPC_STUB) + add_custom_target(${SPEC}stub) + add_custom_command( + TARGET ${SPEC}stub + POST_BUILD + DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${SPEC}" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMAND ${CMAKE_COMMAND} -DETH_SPEC_PATH="${CMAKE_CURRENT_SOURCE_DIR}/${SPEC}" -DETH_SOURCE_DIR="${CMAKE_SOURCE_DIR}" -DETH_CMAKE_DIR="${ETH_CMAKE_DIR}" + -DETH_CLIENT_DIR="${CLIENTDIR}" + -DETH_CLIENT_NAME=${CLIENTNAME} + -DETH_CLIENT_FILENAME=${CLIENTFILENAME} + -DETH_SERVER_DIR="${SERVERDIR}" + -DETH_SERVER_NAME=${SERVERNAME} + -DETH_SERVER_FILENAME=${SERVERFILENAME} + -DETH_JSON_RPC_STUB="${ETH_JSON_RPC_STUB}" + -P "${ETH_SCRIPTS_DIR}/jsonrpcstub.cmake" + ) + add_dependencies(${EXECUTABLE} ${SPEC}stub) + endif () +endmacro() \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index c22343e765..f7dcc07e97 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,91 +1,9 @@ -# Default output dir containing build artifacts ARG BUILD_OUTPUT_DIR=cmake-docker-build-debug -############################################# -# builder image - contains all dependencies # -############################################# -FROM ubuntu:22.04 as builder - - -# deps versions -ARG LLVM_VERSION=14 - -# Install standard packages -RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ - apt-get install -y --no-install-recommends \ - tzdata \ - && apt-get install -y \ - tar \ - git \ - curl \ - wget \ - python3-pip \ - lsb-release \ - libgmp-dev \ - libmpfr-dev \ - software-properties-common \ - && rm -rf /var/lib/apt/lists/* - -# install solc for py_test if arch is not arm64 because it is not availiable -RUN \ -if [ `arch` != "aarch64" ]; \ -then \ - add-apt-repository ppa:ethereum/ethereum \ - && apt-get update \ - && apt install solc; \ -fi - -# install standart tools -RUN add-apt-repository ppa:ethereum/ethereum \ - && apt-get update \ - && apt-get install -y \ - clang-format-$LLVM_VERSION \ - clang-tidy-$LLVM_VERSION \ - llvm-$LLVM_VERSION \ - golang-go \ - ca-certificates \ - libtool \ - autoconf \ - binutils \ - cmake \ - ccache \ - # this libs are required for arm build by go part - libzstd-dev \ - libsnappy-dev \ - # replace this with conan dependency - rapidjson-dev \ - && rm -rf /var/lib/apt/lists/* - -ENV CXX="clang++-${LLVM_VERSION}" -ENV CC="clang-${LLVM_VERSION}" - -# HACK remove this when update to conan 2.0 -RUN ln -s /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang -RUN ln -s /usr/bin/clang++-${LLVM_VERSION} /usr/bin/clang++ - -# Install conan -RUN pip3 install conan==1.59.0 - -ENV CONAN_REVISIONS_ENABLED=1 - -# Install conan deps -WORKDIR /opt/taraxa/ -COPY conanfile.py . - -RUN conan remote add -f bincrafters "https://bincrafters.jfrog.io/artifactory/api/conan/public-conan" \ - && conan profile new clang --detect \ - && conan profile update settings.compiler=clang clang \ - && conan profile update settings.compiler.version=$LLVM_VERSION clang \ - && conan profile update settings.compiler.libcxx=libstdc++11 clang \ - && conan profile update settings.build_type=RelWithDebInfo clang \ - && conan profile update env.CC=clang-$LLVM_VERSION clang \ - && conan profile update env.CXX=clang++-$LLVM_VERSION clang \ - && conan install --build missing -pr:b=clang . - ################################################################### # Build stage - use builder image for actual build of taraxa node # ################################################################### -FROM builder as build +FROM taraxa/taraxa-builder:v0.1.0 as build # Default output dir containing build artifacts ARG BUILD_OUTPUT_DIR @@ -98,8 +16,9 @@ RUN mkdir $BUILD_OUTPUT_DIR && cd $BUILD_OUTPUT_DIR \ && cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo \ -DTARAXA_ENABLE_LTO=OFF \ -DTARAXA_STATIC_BUILD=OFF \ - ../ \ - && make -j$(nproc) all \ + ../ + +RUN cd $BUILD_OUTPUT_DIR && make -j$(nproc) all \ # Copy CMake generated Testfile to be able to trigger ctest from bin directory && cp tests/CTestTestfile.cmake bin/ \ # keep only required shared libraries and final binaries diff --git a/builder.Dockerfile b/builder.Dockerfile new file mode 100644 index 0000000000..1ecf5c8458 --- /dev/null +++ b/builder.Dockerfile @@ -0,0 +1,81 @@ +############################################# +# builder image - contains all dependencies # +############################################# +FROM ubuntu:22.04 + +# deps versions +ARG LLVM_VERSION=14 + +# Install standard packages +RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ + apt-get install -y --no-install-recommends \ + tzdata \ + && apt-get install -y \ + tar \ + git \ + curl \ + wget \ + python3-pip \ + lsb-release \ + libgmp-dev \ + libmpfr-dev \ + libmicrohttpd-dev \ + software-properties-common \ + && rm -rf /var/lib/apt/lists/* + +# install solc for py_test if arch is not arm64 because it is not availiable +RUN \ +if [ `arch` != "aarch64" ]; \ +then \ + add-apt-repository ppa:ethereum/ethereum \ + && apt-get update \ + && apt install solc; \ +fi + +# install standart tools +RUN add-apt-repository ppa:ethereum/ethereum \ + && apt-get update \ + && apt-get install -y \ + clang-format-$LLVM_VERSION \ + clang-tidy-$LLVM_VERSION \ + llvm-$LLVM_VERSION \ + golang-go \ + ca-certificates \ + libtool \ + autoconf \ + binutils \ + cmake \ + ccache \ + # this libs are required for arm build by go part + libzstd-dev \ + libsnappy-dev \ + # replace this with conan dependency + rapidjson-dev \ + && rm -rf /var/lib/apt/lists/* + +ENV CXX="clang++-${LLVM_VERSION}" +ENV CC="clang-${LLVM_VERSION}" + +# HACK remove this when update to conan 2.0 +RUN ln -s /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang +RUN ln -s /usr/bin/clang++-${LLVM_VERSION} /usr/bin/clang++ + +# Install conan +RUN pip3 install conan==1.59.0 + +ENV CONAN_REVISIONS_ENABLED=1 + +# Install conan deps +WORKDIR /opt/taraxa/ +COPY conanfile.py . + +RUN conan profile new clang --detect \ + && conan profile update settings.compiler=clang clang \ + && conan profile update settings.compiler.version=$LLVM_VERSION clang \ + && conan profile update settings.compiler.libcxx=libstdc++11 clang \ + && conan profile update settings.build_type=RelWithDebInfo clang \ + && conan profile update env.CC=clang-$LLVM_VERSION clang \ + && conan profile update env.CXX=clang++-$LLVM_VERSION clang \ + && conan install --build missing -pr=clang . + + diff --git a/charts/taraxa-node/.gitignore b/charts/taraxa-node/.gitignore new file mode 100644 index 0000000000..7d251af7f5 --- /dev/null +++ b/charts/taraxa-node/.gitignore @@ -0,0 +1,6 @@ +# do not include into git chart dependencies +charts/*.tgz + +# Helm stuff +requirements.lock +Chart.lock \ No newline at end of file diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index d5896d3506..f923c637a9 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -3,6 +3,53 @@ This file documents all notable changes to `taraxa-node` Helm Chart. The release numbering uses [semantic versioning](http://semver.org). +## v0.3.9 + +### Major changes + +* Rename / restrusture manifest files +* Added light nodes + +### Minor changes + +* Removed "@channel" from slack notifications + +## v0.3.8 + +### Minor changes + +* Added port for scrapping metrics from sidecar of rpc-nodes ([taraxa-indexer](https://github.com/Taraxa-project/taraxa-indexer)) + +## v0.3.7 + +### Minor changes + +* Adds transaction generating service to replace the explorer faucet + +## v0.3.6 + +### Minor changes + +* Added labels into `StatefulSets` for [kube-monkey](https://github.com/asobti/kube-monkey) + +## v0.3.5 + +### Minor changes + +* Changed `db_path` to `data_dir` for taraxa-indexer + +## v0.3.4 + +### Minor changes + +* Enabled CORS on `Ingress` of indexer + +## v0.3.3 + +### Major changes + +* Added sidecar container to RPC nodes with [taraxa-indexer](https://github.com/Taraxa-project/taraxa-indexer) + ## v0.3.2 ### Minor changes @@ -21,6 +68,13 @@ numbering uses [semantic versioning](http://semver.org). * Separate config for genesis +## v0.2.5 + +### Minor changes + +* Allow for different images in `StatefulSet`s for boot, rpc and consensus nodes + + ## v0.2.4 ### Minor changes diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index d0a045aaa9..bd384e984d 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.2 +version: 0.3.10 keywords: - blockchain - taraxa diff --git a/charts/taraxa-node/templates/_helpers.tpl b/charts/taraxa-node/templates/_helpers.tpl index a9855a8bc1..02d1c805b1 100644 --- a/charts/taraxa-node/templates/_helpers.tpl +++ b/charts/taraxa-node/templates/_helpers.tpl @@ -87,6 +87,19 @@ If release name contains chart name it will be used as a full name. {{- end -}} {{- end -}} +{{/* +Create a default fully qualified indexer name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "taraxa-node.indexerName" -}} +{{- if .Values.indexerNameOverride -}} +{{- .Values.indexerNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s.%s" "indexer" .Release.Name .Values.domain | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified graphql websocket. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). diff --git a/charts/taraxa-node/templates/boot-nodes-services-loadbalancer.yaml b/charts/taraxa-node/templates/boot-nodes-services-loadbalancer.yaml deleted file mode 100644 index ed6379ba82..0000000000 --- a/charts/taraxa-node/templates/boot-nodes-services-loadbalancer.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{ if .Values.bootnode.enabled }} -{{ if .Values.bootnode.loadBalancer.enabled }} -{{- range $key, $value := .Values.bootnode.loadBalancer.addresses }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} - {{- with $.Values.bootnode.loadBalancer.serviceAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} -spec: - type: LoadBalancer - loadBalancerIP: {{ $value | quote }} - externalTrafficPolicy: Local - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-boot-node-{{ $key }} - ports: - - name: udp-listen-port - port: 10002 - targetPort: 10002 - protocol: UDP -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/taraxa-node/templates/initconfig-boot-node.yaml b/charts/taraxa-node/templates/bootnode-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/initconfig-boot-node.yaml rename to charts/taraxa-node/templates/bootnode-configmap.yaml diff --git a/charts/taraxa-node/templates/bootnode-service.yaml b/charts/taraxa-node/templates/bootnode-service.yaml new file mode 100644 index 0000000000..5e2f2c52fd --- /dev/null +++ b/charts/taraxa-node/templates/bootnode-service.yaml @@ -0,0 +1,59 @@ +{{ if .Values.bootnode.enabled }} +{{- if .Values.bootnode.service.ports }} +# Note: This is a headless service +apiVersion: v1 +kind: Service +metadata: + name: {{ include "taraxa-boot-node.fullname" . }} + labels: + name: boot-node + app.kubernetes.io/name: boot-node + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + selector: + app: boot-node + app.kubernetes.io/name: {{ .Release.Name }}-boot-node + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + {{- range $port := .Values.bootnode.service.ports }} + - name: {{ $port.name | default $port.port }} + port: {{ $port.port }} + targetPort: {{ $port.targetPort | default $port.port }} + {{- if $port.protocol }} + protocol: {{ $port.protocol }} + {{- end }} + {{- end }} +{{- end }} + +{{ if .Values.bootnode.loadBalancer.enabled }} +{{- range $key, $value := .Values.bootnode.loadBalancer.addresses }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} + {{- with $.Values.bootnode.loadBalancer.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} +spec: + type: LoadBalancer + loadBalancerIP: {{ $value | quote }} + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-boot-node-{{ $key }} + ports: + - name: udp-listen-port + port: 10002 + targetPort: 10002 + protocol: UDP +{{- end }} +{{- end }} + +{{- end }} diff --git a/charts/taraxa-node/templates/boot-node-servicemonitor.yaml b/charts/taraxa-node/templates/bootnode-servicemonitor.yaml similarity index 100% rename from charts/taraxa-node/templates/boot-node-servicemonitor.yaml rename to charts/taraxa-node/templates/bootnode-servicemonitor.yaml diff --git a/charts/taraxa-node/templates/boot-node.yaml b/charts/taraxa-node/templates/bootnode-statefulset.yaml similarity index 76% rename from charts/taraxa-node/templates/boot-node.yaml rename to charts/taraxa-node/templates/bootnode-statefulset.yaml index f73004d58b..0b848666ec 100644 --- a/charts/taraxa-node/templates/boot-node.yaml +++ b/charts/taraxa-node/templates/bootnode-statefulset.yaml @@ -9,6 +9,13 @@ metadata: helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} spec: replicas: {{ .Values.bootnode.replicaCount }} serviceName: {{ include "taraxa-boot-node.fullname" . }} @@ -29,13 +36,29 @@ spec: partition: a app.kubernetes.io/name: {{ .Release.Name }}-boot-node app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." spec: initContainers: - name: config-adapter + {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag }} + image: "{{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + + {{- if .Values.bootnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} envFrom: - secretRef: name: {{ .Release.Name }} @@ -54,8 +77,17 @@ spec: mountPath: /root/.taraxa containers: - name: boot-node + {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag }} + image: "{{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + + {{- if .Values.bootnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} args: {{- toYaml .Values.bootnode.args | nindent 12 }} - --chain-id diff --git a/charts/taraxa-node/templates/initconfig-consensus-node.yaml b/charts/taraxa-node/templates/consensus-node-configmap.yaml similarity index 99% rename from charts/taraxa-node/templates/initconfig-consensus-node.yaml rename to charts/taraxa-node/templates/consensus-node-configmap.yaml index a751e901a8..ba7b05621f 100644 --- a/charts/taraxa-node/templates/initconfig-consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node-configmap.yaml @@ -99,7 +99,6 @@ data: echo "Cleaning up old config..." rm -rf $CONFIG_PATH - rm -rf $GENESIS_PATH echo "Generating config" INDEX=${HOSTNAME##*-} diff --git a/charts/taraxa-node/templates/consensus-node-light-configmap.yaml b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml new file mode 100644 index 0000000000..7d4adc594c --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml @@ -0,0 +1,177 @@ +{{ if .Values.consensusnodeLight.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-consensus-node-light-init-script + labels: + app: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + genconfig.py: |- + import json + import sys + import subprocess + + def get_vrf_public(vrf_prv_key): + process = subprocess.Popen(['taraxad', '--command', 'vrf', vrf_prv_key],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + for line in process.stdout: + l = line.decode("utf-8") + + if "vrf_public" in l: + vrf_public = l.split(':')[1].replace("\"", "").strip() + return f'0x{vrf_public}' + + def get_addr(prv_key): + process = subprocess.Popen(['taraxad', '--command', 'account', prv_key],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + for line in process.stdout: + l = line.decode("utf-8") + + if "node_address" in l: + addr = l.split(':')[1].replace("\"", "").strip() + return f'0x{addr}' + + def main(config): + keys = [] + vrfs = [] + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + keys = [ + {{- range $key, $value := .Values.config.consensusnode.keys }} + "{{ $value }}", + {{- end }} + ] + {{- end }} + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + vrfs = [ + {{- range $key, $value := .Values.config.consensusnode.vrfs }} + "{{ $value }}", + {{- end }} + ] + {{- end }} + + with open(config) as f: + data = json.load(f) + + initial_validators = data['dpos']['initial_validators'] + + # get delegations from 1st default validator + delegations = initial_validators[0]['delegations'] + + validators = [] + for idx, key in enumerate(keys): + + validator = { + 'address': '', + 'commission': '0x0', + 'delegations': {}, + 'description': 'Taraxa validator', + 'endpoint': '', + 'owner': '' + } + + addr = get_addr(key) + validator['address'] = addr + validator['owner'] = addr + validator['delegations'] = delegations + validator['vrf_key'] = get_vrf_public(vrfs[idx]) + validators.append(validator) + + data['dpos']['initial_validators'] = validators + print(json.dumps(data)) + + if __name__ == "__main__": + config_file_name = sys.argv[1] + main(config_file_name) + + entrypoint.sh: |- + #!/bin/bash + DATA_PATH=/root/.taraxa + CONFIG_PATH=$DATA_PATH/conf_taraxa.json + GENESIS_PATH=$DATA_PATH/genesis_taraxa.json + WALLET_PATH=$DATA_PATH/wallet_taraxa.json + + echo "Cleaning up old config..." + rm -rf $CONFIG_PATH + + echo "Generating config" + INDEX=${HOSTNAME##*-} + KEY="CONSENSUS_NODE_LIGHT_KEY_${INDEX}" + VRF="CONSENSUS_NODE_LIGHT_VRF_${INDEX}" + + KEY="${!KEY}" + VRF="${!VRF}" + + if [ -z "$KEY" ] + then + if [ ! -f "$WALLET_PATH" ] + then + echo "No predifined keys. Generating new wallet..." + KEY=$(taraxad --command account | grep node_secret | cut -d\ -f3- | tr -d \") + VRF=$(taraxad --command vrf | grep vrf_secret | cut -d\ -f3 | tr -d \") + {{ if .Values.explorer.enabled }} + NODE_ADDRESS=$(taraxad --command account ${KEY} | grep node_address | cut -d\ -f3 | tr -d \") + echo "New wallet: 0x${NODE_ADDRESS}" + + SIG=$(taraxa-sign sign --key 0x${EXPLORER_DELEGATION_PRIVATE_KEY} 0x${NODE_ADDRESS}) + + curl --silent http://{{ .Release.Name }}-explorer/api/delegate/0x${NODE_ADDRESS}?sig=${SIG} + {{- end }} + else + echo "Found wallet file." + KEY=$(cat "$WALLET_PATH" | jq -r .node_secret) + VRF=$(cat "$WALLET_PATH" | jq -r .vrf_secret) + fi + fi + + {{ if .Values.explorer.enabled }} + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + NODE_ADDRESS=$(taraxad --command account ${KEY} | grep node_address | cut -d\ -f3 | tr -d \") + curl --silent http://{{ .Release.Name }}-explorer/api/faucet/0x${NODE_ADDRESS} + {{- end }} + {{- end }} + + taraxad --command config \ + --chain-id {{ .Values.config.network }} \ + --node-secret ${KEY} \ + --vrf-secret ${VRF} \ + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + {{- $lbEnabled := .Values.bootnode.loadBalancer.enabled -}} + {{- $lbIPs := .Values.bootnode.loadBalancer.addresses -}} + {{- range $key, $value := .Values.config.bootnode.addresses }} + --boot-nodes {{ if $lbEnabled }}{{ index $lbIPs $key }}{{- else }}{{ include "taraxa-boot-node.fullname" $ }}-{{ $key }}.{{ include "taraxa-boot-node.fullname" $ }}.{{$.Release.Namespace}}{{- end }}:10002/{{ $value }} \ + {{- end }} + {{- end }} + {{- if .Values.config.extraArgs }} + {{ join " " .Values.config.extraArgs }} \ + {{- end }} + --config $CONFIG_PATH \ + --genesis $GENESIS_PATH \ + --wallet $WALLET_PATH \ + --data-dir $DATA_PATH + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + python3 /bin/genconfig.py $GENESIS_PATH > $GENESIS_PATH.tmp && mv $GENESIS_PATH.tmp $GENESIS_PATH + {{- end }} + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + {{ if .Values.explorer.enabled }} + export FAUCET_ADDRESS=$(taraxad --command account ${EXPLORER_FAUCET_PRIVATE_KEY} | grep node_address | cut -d\ -f3 | tr -d \") + cat $GENESIS_PATH | jq '.initial_balances += ({("0x"+env.FAUCET_ADDRESS): "0x1027e72f1f12813088000000"})' > $GENESIS_PATH.tmp && mv $GENESIS_PATH.tmp $GENESIS_PATH + {{- end }} + {{- end }} + + echo "***** $CONFIG_PATH *****" + cat $CONFIG_PATH + echo "***** $CONFIG_PATH *****" + + echo "***** $GENESIS_PATH *****" + cat $GENESIS_PATH + echo "***** $GENESIS_PATH *****" +{{- end }} diff --git a/charts/taraxa-node/templates/boot-node-service.yaml b/charts/taraxa-node/templates/consensus-node-light-service.yaml similarity index 61% rename from charts/taraxa-node/templates/boot-node-service.yaml rename to charts/taraxa-node/templates/consensus-node-light-service.yaml index 0f6f1ab71f..724a7e02df 100644 --- a/charts/taraxa-node/templates/boot-node-service.yaml +++ b/charts/taraxa-node/templates/consensus-node-light-service.yaml @@ -1,13 +1,13 @@ -{{ if .Values.bootnode.enabled }} -{{- if .Values.bootnode.service.ports }} +{{ if .Values.consensusnodeLight.enabled }} +{{- if .Values.consensusnodeLight.service.ports }} # Note: This is a headless service apiVersion: v1 kind: Service metadata: - name: {{ include "taraxa-boot-node.fullname" . }} + name: {{ include "taraxa-consensus-node.fullname" . }}-light labels: - name: boot-node - app.kubernetes.io/name: boot-node + name: consensus-node-light + app.kubernetes.io/name: consensus-node-light helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -15,11 +15,11 @@ spec: type: ClusterIP clusterIP: None selector: - app: boot-node - app.kubernetes.io/name: {{ .Release.Name }}-boot-node + app: consensus-node-light + app.kubernetes.io/name: {{ .Release.Name }}-consensus-node-light app.kubernetes.io/instance: {{ .Release.Name }} ports: - {{- range $port := .Values.bootnode.service.ports }} + {{- range $port := .Values.consensusnode.service.ports }} - name: {{ $port.name | default $port.port }} port: {{ $port.port }} targetPort: {{ $port.targetPort | default $port.port }} diff --git a/charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml b/charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml new file mode 100644 index 0000000000..730f84f570 --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml @@ -0,0 +1,26 @@ +{{ if .Values.consensusnodeLight.enabled }} +{{- if .Values.consensusnodeLight.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "taraxa-consensus-node.fullname" . }}-light + labels: + name: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ $.Release.Namespace | quote }} + endpoints: + - honorLabels: true + path: /metrics + port: metrics +{{- end }} +{{- end }} diff --git a/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml new file mode 100644 index 0000000000..91fcda21a2 --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml @@ -0,0 +1,235 @@ +{{ if .Values.consensusnodeLight.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "taraxa-consensus-node.fullname" . }}-light + labels: + app: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }}-light + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} +spec: + replicas: {{ .Values.consensusnodeLight.replicaCount }} + serviceName: {{ include "taraxa-consensus-node.fullname" . }}-light + # to launch or terminate all Pods in parallel. + # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#parallel-pod-management + podManagementPolicy: Parallel + selector: + matchLabels: + app: consensus-node-light + partition: a + app.kubernetes.io/name: {{ .Release.Name }}-consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + name: consensus-node-light + labels: + app: consensus-node-light + partition: a + app.kubernetes.io/name: {{ .Release.Name }}-consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }}-light + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} + annotations: + kubernetes.io/change-cause: "Configuration through configmaps." + spec: + initContainers: + {{ if .Values.explorer.enabled }} + - name: wait-for-explorer + image: dwdraju/alpine-curl-jq:latest + command: ["/bin/entrypoint.sh"] + volumeMounts: + - name: explorer-check + mountPath: /bin/entrypoint.sh + readOnly: true + subPath: entrypoint.sh + {{- end }} + - name: config-adapter + {{- if and .Values.consensusnodeLight.image.repository .Values.consensusnodeLight.image.tag }} + image: "{{ .Values.consensusnodeLight.image.repository }}:{{ .Values.consensusnodeLight.image.tag }}" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnodeLight.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnodeLight.image.pullPolicy }} + {{- else }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} + envFrom: + - secretRef: + name: {{ .Release.Name }} + env: + - name: HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + command: ["/bin/entrypoint.sh"] + volumeMounts: + - name: initconfig + mountPath: /bin/entrypoint.sh + readOnly: true + subPath: entrypoint.sh + - name: initconfig + mountPath: /bin/genconfig.py + readOnly: true + subPath: genconfig.py + - name: data + mountPath: /root/.taraxa + containers: + {{- if .Values.slack.enabled }} + - name: status + image: "python:3.8" + imagePullPolicy: IfNotPresent + env: + - name: SLACK_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Release.Name }} + key: SLACK_TOKEN + - name: SLACK_CHANNEL + value: {{ .Values.slack.channel }} + - name: K8S_CLUSTER + value: {{ .Values.slack.k8s_cluster }} + command: ["/bin/bash", "-c", "--"] + args: [ "pip install -r /app/requirements.txt && python /app/status.py" ] + volumeMounts: + - name: status-requirements + mountPath: /app/requirements.txt + readOnly: true + subPath: requirements.txt + - name: status-script + mountPath: /app/status.py + readOnly: true + subPath: status.py + {{- end }} + - name: consensus-node-light + {{- if and .Values.consensusnodeLight.image.repository .Values.consensusnodeLight.image.tag }} + image: "{{ .Values.consensusnodeLight.image.repository }}:{{ .Values.consensusnodeLight.image.tag }}" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnodeLight.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnodeLight.image.pullPolicy }} + {{- else }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} + args: + {{- toYaml .Values.consensusnodeLight.args | nindent 12 }} + env: + - name: DEBUG + value: "{{ .Values.consensusnodeLight.debug }}" + - name: HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if not .Values.consensusnodeLight.probes.enabled }} + - name: TARAXA_SLEEP_DIAGNOSE + value: "true" + {{- end }} + ports: + {{- toYaml .Values.consensusnodeLight.ports | nindent 12 }} + {{- if .Values.consensusnodeLight.probes.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + - "ps -A | grep taraxad" + initialDelaySeconds: 10 + periodSeconds: 5 + readinessProbe: + exec: + command: + - curl + - -X + - POST + - -H + - "'Content-Type: application/json'" + - -d + - "'{\"jsonrpc\":\"2.0\",\"method\":\"taraxa_protocolVersion\",\"params\": [],\"id\":1}'" + - http://127.0.0.1:7777 + initialDelaySeconds: 10 + periodSeconds: 5 + {{- end }} + resources: + {{- toYaml .Values.consensusnodeLight.resources | nindent 12 }} + volumeMounts: + - name: data + mountPath: /root/.taraxa + securityContext: + capabilities: + add: + - SYS_PTRACE + {{- with .Values.consensusnodeLight.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: initconfig + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-consensus-node-light-init-script + {{ if .Values.explorer.enabled }} + - name: explorer-check + configMap: + defaultMode: 0700 + name: {{ include "taraxa-node.fullname" . }}-explorer-check + {{- end }} + {{- if .Values.slack.enabled }} + - name: status-requirements + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-node-status-script + - name: status-script + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-node-status-script + {{- end }} + {{- if not .Values.consensusnodeLight.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.consensusnodeLight.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + annotations: + {{- if .Values.consensusnodeLight.persistence.annotations}} + {{- toYaml .Values.consensusnodeLight.persistence.annotations | nindent 4 }} + {{- end }} + spec: + accessModes: + - {{ .Values.consensusnodeLight.persistence.accessMode | quote }} + {{- if .Values.consensusnodeLight.persistence.storageClass }} + {{- if (eq "-" .Values.consensusnodeLight.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.consensusnodeLight.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.consensusnodeLight.persistence.size }}" + {{- end }} +{{- end }} diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node-statefulset.yaml similarity index 81% rename from charts/taraxa-node/templates/consensus-node.yaml rename to charts/taraxa-node/templates/consensus-node-statefulset.yaml index df2608e935..920a688eb0 100644 --- a/charts/taraxa-node/templates/consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node-statefulset.yaml @@ -9,6 +9,13 @@ metadata: helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} spec: replicas: {{ .Values.consensusnode.replicaCount }} serviceName: {{ include "taraxa-consensus-node.fullname" . }} @@ -29,6 +36,13 @@ spec: partition: a app.kubernetes.io/name: {{ .Release.Name }}-consensus-node app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." spec: @@ -44,8 +58,16 @@ spec: subPath: entrypoint.sh {{- end }} - name: config-adapter + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} + image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} envFrom: - secretRef: name: {{ .Release.Name }} @@ -94,8 +116,16 @@ spec: subPath: status.py {{- end }} - name: consensus-node + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} + image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} args: {{- toYaml .Values.consensusnode.args | nindent 12 }} env: diff --git a/charts/taraxa-node/templates/explorer-check.yaml b/charts/taraxa-node/templates/explorer-check-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/explorer-check.yaml rename to charts/taraxa-node/templates/explorer-check-configmap.yaml diff --git a/charts/taraxa-node/templates/port-check.yaml b/charts/taraxa-node/templates/port-check.yaml deleted file mode 100644 index 2a754fa05b..0000000000 --- a/charts/taraxa-node/templates/port-check.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- $fullName := include "taraxa-node.fullname" . -}} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ $fullName }}-port-check - labels: - helm.sh/chart: {{ include "taraxa-node.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -data: - entrypoint.sh: |- - #!/bin/sh - set -e - echo "Checking host $1" - while [ $(nc -z -w5 $1 $2 > /dev/null 2>&1; echo $?) -ne 0 ]; do - sleep 5 - echo "Waiting for $1:$2..." - done diff --git a/charts/taraxa-node/templates/secrets.yaml b/charts/taraxa-node/templates/secrets.yaml index aa7631a87f..7d2f50167b 100644 --- a/charts/taraxa-node/templates/secrets.yaml +++ b/charts/taraxa-node/templates/secrets.yaml @@ -27,6 +27,12 @@ data: {{- range $key, $value := .Values.config.consensusnode.vrfs }} CONSENSUS_NODE_VRF_{{ $key }}: {{ $value | b64enc | quote }} {{- end }} + {{- range $key, $value := .Values.config.consensusnodeLight.keys }} + CONSENSUS_NODE_LIGHT_KEY_{{ $key }}: {{ $value | b64enc | quote }} + {{- end }} + {{- range $key, $value := .Values.config.consensusnodeLight.vrfs }} + CONSENSUS_NODE_LIGHT_VRF_{{ $key }}: {{ $value | b64enc | quote }} + {{- end }} {{- range $key, $value := .Values.config.bootnode.keys }} BOOT_NODE_KEY_{{ $key }}: {{ $value | b64enc | quote }} {{- end }} @@ -36,5 +42,6 @@ data: SLACK_TOKEN: {{ .Values.slack.token | b64enc | quote }} EXPLORER_DELEGATION_PRIVATE_KEY: {{ .Values.config.consensusnode.explorerDelegationPrivateKey | b64enc | quote }} EXPLORER_FAUCET_PRIVATE_KEY: {{ .Values.explorer.faucet.privKey | b64enc | quote }} + TRANSACTION_GENERATION_PRIVATE_KEY: {{ .Values.transactionGeneration.privateKey | b64enc | quote }} {{- end }} {{- end }} diff --git a/charts/taraxa-node/templates/node-status-script.yaml b/charts/taraxa-node/templates/status-script-configmap.yaml similarity index 93% rename from charts/taraxa-node/templates/node-status-script.yaml rename to charts/taraxa-node/templates/status-script-configmap.yaml index 185546ce2a..01fa04ac0c 100644 --- a/charts/taraxa-node/templates/node-status-script.yaml +++ b/charts/taraxa-node/templates/status-script-configmap.yaml @@ -118,8 +118,8 @@ data: switcher = { "UP": ":white_check_mark: {} ({}) node is up and running :white_check_mark:".format(HOSTNAME, K8S_CLUSTER), - "DOWN": "@channel :fire: {} ({}) node is down (RPC not responding) :fire:".format(HOSTNAME, K8S_CLUSTER), - "DOWN_NP": "@channel :fire: {} ({}) node is down (no network progress). Last block is {} :fire:".format(HOSTNAME, K8S_CLUSTER, last_block), + "DOWN": ":fire: {} ({}) node is down (RPC not responding) :fire:".format(HOSTNAME, K8S_CLUSTER), + "DOWN_NP": ":fire: {} ({}) node is down (no network progress). Last block is {} :fire:".format(HOSTNAME, K8S_CLUSTER, last_block), } message = switcher.get(current_status) diff --git a/charts/taraxa-node/templates/initconfig-node.yaml b/charts/taraxa-node/templates/taraxa-node-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/initconfig-node.yaml rename to charts/taraxa-node/templates/taraxa-node-configmap.yaml diff --git a/charts/taraxa-node/templates/taraxa-node-ingress.yaml b/charts/taraxa-node/templates/taraxa-node-ingress.yaml index 927e068001..e42f1f868b 100644 --- a/charts/taraxa-node/templates/taraxa-node-ingress.yaml +++ b/charts/taraxa-node/templates/taraxa-node-ingress.yaml @@ -1,5 +1,6 @@ {{ if .Values.node.enabled }} {{- if .Values.node.ingress.enabled -}} + {{- $fullName := include "taraxa-node.fullname" . -}} {{- $apiIsStable := eq (include "taraxa-node.ingress.isStable" .) "true" -}} {{- $ingressSupportsPathType := eq (include "taraxa-node.ingress.supportsPathType" .) "true" -}} @@ -9,11 +10,14 @@ {{- $servicePortRpcWs := 8777 -}} {{- $servicePortGraphQl := 9777 -}} {{- $servicePortGraphQlWs := 6777 -}} +{{- $servicePortHttp := 8080 -}} + {{- range .Values.node.service.ports }} {{ if eq .name "rest"}} {{ $servicePortRpc = .port }} {{ end }} {{ if eq .name "ws"}} {{ $servicePortRpcWs = .port }} {{ end }} {{ if eq .name "graphql"}} {{ $servicePortGraphQl = .port }} {{ end }} {{ if eq .name "graphql-ws"}} {{ $servicePortGraphQlWs = .port }} {{ end }} + {{ if eq .name "http-indexer"}} {{ $servicePortHttp = .port }} {{ end }} {{- end }} {{- $pathType := .Values.node.ingress.pathType | default "ImplementationSpecific" -}} @@ -197,5 +201,51 @@ spec: serviceName: {{ $serviceName }} servicePort: {{ $servicePortGraphQlWs }} {{- end }} + +--- +apiVersion: {{ include "taraxa-node.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }}-indexer + labels: + app: taraxa-node + app.kubernetes.io/name: {{ include "taraxa-node.name" . }} + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.node.ingress.annotationsIndexer }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if $apiIsStable }} +{{- if .Values.node.ingress.ingressClassName }} + ingressClassName: {{ .Values.node.ingress.ingressClassName }} +{{- end }} +{{- end }} + {{- if .Values.wildCertDomainSecret }} + tls: + - hosts: + - {{ include "taraxa-node.indexerName" . | quote }} + secretName: {{ .Values.wildCertDomainSecret }} + {{- end }} + rules: + - host: {{ include "taraxa-node.indexerName" . | quote }} + http: + paths: + - path: / + {{- if and $pathType $ingressSupportsPathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + {{- if $apiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePortHttp }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePortHttp }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/taraxa-node/templates/taraxa-node-service.yaml b/charts/taraxa-node/templates/taraxa-node-service.yaml index b9a6653e1c..7b3dab0832 100644 --- a/charts/taraxa-node/templates/taraxa-node-service.yaml +++ b/charts/taraxa-node/templates/taraxa-node-service.yaml @@ -52,4 +52,74 @@ spec: protocol: {{ $port.protocol }} {{- end }} {{- end }} + +{{ if .Values.node.loadBalancer.enabled }} +{{- range $key, $value := .Values.node.loadBalancer.addresses }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} + labels: + name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} +spec: + type: LoadBalancer + loadBalancerIP: {{ $value | quote }} + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} + ports: + - name: udp-listen-port + port: 10002 + targetPort: 10002 + protocol: UDP + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} + labels: + name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} +spec: + type: LoadBalancer + loadBalancerIP: {{ $value | quote }} + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} + ports: + - name: tcp-listen-port + port: 10002 + targetPort: 10002 + protocol: TCP +{{- end }} +{{- end }} + +{{ if .Values.node.nodePort.enabled }} +{{- range $key, $value := .Values.node.nodePort.ports }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} + labels: + name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} +spec: + type: NodePort + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} + ports: + - name: udp-listen-port + port: {{ $value }} + targetPort: {{ $value }} + nodePort: {{ $value }} + protocol: UDP + - name: tcp-listen-port + port: {{ $value }} + targetPort: {{ $value }} + nodePort: {{ $value }} + protocol: TCP +{{- end }} +{{- end }} + {{- end }} diff --git a/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml b/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml index 0ee519890d..ed7d6aaa2a 100644 --- a/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml +++ b/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml @@ -22,5 +22,8 @@ spec: - honorLabels: true path: /metrics port: metrics + - honorLabels: true + path: /metrics + port: metrics-indexer {{- end }} {{- end }} diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node-statefulset.yaml similarity index 71% rename from charts/taraxa-node/templates/taraxa-node.yaml rename to charts/taraxa-node/templates/taraxa-node-statefulset.yaml index 49494c0e2f..b462673a3a 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node-statefulset.yaml @@ -10,6 +10,13 @@ metadata: helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} spec: replicas: {{ .Values.node.replicaCount }} # to launch or terminate all Pods in parallel. @@ -30,13 +37,28 @@ spec: partition: a app.kubernetes.io/name: {{ include "taraxa-node.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." spec: initContainers: - name: config-adapter + {{- if and .Values.node.image.repository .Values.node.image.tag }} + image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.node.image.pullPolicy }} + imagePullPolicy: {{ .Values.node.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} envFrom: - secretRef: name: {{ .Release.Name }} @@ -90,9 +112,31 @@ spec: readOnly: true subPath: status.py {{- end }} + {{- if .Values.node.indexer.enabled }} + - name: taraxa-indexer + image: "{{ .Values.node.indexer.image.repository }}:{{ .Values.node.indexer.image.tag }}" + imagePullPolicy: {{ .Values.node.indexer.image.pullPolicy }} + command: ["/taraxa-indexer"] + args: + - -data_dir + - {{ .Values.node.indexer.persistence.mountPoint }} + - -blockchain_ws + - 'ws://localhost:8777' + volumeMounts: + - name: indexer-data + mountPath: /data + {{- end }} - name: taraxa-node + {{- if and .Values.node.image.repository .Values.node.image.tag }} + image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.node.image.pullPolicy }} + imagePullPolicy: {{ .Values.node.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} args: {{- toYaml .Values.node.args | nindent 12 }} env: @@ -208,5 +252,26 @@ spec: resources: requests: storage: "{{ .Values.node.persistence.size }}" + {{- if .Values.node.indexer.enabled }} + - metadata: + name: indexer-data + annotations: + {{- if .Values.node.indexer.persistence.annotations}} + {{- toYaml .Values.node.indexer.persistence.annotations | nindent 4 }} + {{- end }} + spec: + accessModes: + - {{ .Values.node.indexer.persistence.accessMode | quote }} + {{- if .Values.node.indexer.persistence.storageClass }} + {{- if (eq "-" .Values.node.indexer.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.node.indexer.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.node.indexer.persistence.size }}" + {{- end }} {{- end }} {{- end }} diff --git a/charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml b/charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml deleted file mode 100644 index 818c98b107..0000000000 --- a/charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{ if .Values.node.enabled }} -{{ if .Values.node.loadBalancer.enabled }} -{{- range $key, $value := .Values.node.loadBalancer.addresses }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} - labels: - name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} -spec: - type: LoadBalancer - loadBalancerIP: {{ $value | quote }} - externalTrafficPolicy: Local - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} - ports: - - name: udp-listen-port - port: 10002 - targetPort: 10002 - protocol: UDP - ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} - labels: - name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} -spec: - type: LoadBalancer - loadBalancerIP: {{ $value | quote }} - externalTrafficPolicy: Local - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} - ports: - - name: tcp-listen-port - port: 10002 - targetPort: 10002 - protocol: TCP -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml b/charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml deleted file mode 100644 index 51a5e78fab..0000000000 --- a/charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{ if .Values.node.enabled }} -{{ if .Values.node.nodePort.enabled }} -{{- range $key, $value := .Values.node.nodePort.ports }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} - labels: - name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} -spec: - type: NodePort - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} - ports: - - name: udp-listen-port - port: {{ $value }} - targetPort: {{ $value }} - nodePort: {{ $value }} - protocol: UDP - - name: tcp-listen-port - port: {{ $value }} - targetPort: {{ $value }} - nodePort: {{ $value }} - protocol: TCP -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/taraxa-node/templates/transaction-generation-script-configmap.yaml b/charts/taraxa-node/templates/transaction-generation-script-configmap.yaml new file mode 100644 index 0000000000..316d45bbc5 --- /dev/null +++ b/charts/taraxa-node/templates/transaction-generation-script-configmap.yaml @@ -0,0 +1,127 @@ +{{- if .Values.transactionGeneration.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-transaction-generation + labels: + app: node + app.kubernetes.io/name: node + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + requirements.txt: |- + aiohttp==3.8.4 + aiosignal==1.3.1 + async-timeout==4.0.2 + attrs==22.2.0 + base58==2.1.1 + bitarray==2.7.3 + certifi==2022.12.7 + charset-normalizer==3.1.0 + coloredlogs==15.0.1 + cytoolz==0.12.1 + eth-abi==2.2.0 + eth-account==0.5.9 + eth-hash==0.5.1 + eth-keyfile==0.5.1 + eth-keys==0.3.4 + eth-rlp==0.2.1 + eth-typing==2.3.0 + eth-utils==1.9.5 + frozenlist==1.3.3 + hexbytes==0.3.0 + humanfriendly==10.0 + idna==3.4 + ipfshttpclient==0.8.0a2 + jsonschema==4.17.3 + lru-dict==1.1.8 + multiaddr==0.0.9 + multidict==6.0.4 + netaddr==0.8.0 + parsimonious==0.8.1 + protobuf==3.19.5 + pycryptodome==3.17 + pyrsistent==0.19.3 + python-dotenv==1.0.0 + requests==2.28.2 + rlp==2.0.1 + six==1.16.0 + toolz==0.12.0 + urllib3==1.26.15 + varint==1.0.2 + web3==5.31.4 + websockets==9.1 + yarl==1.8.2 + transactions.py: |- + import logging + import coloredlogs + import time + from dotenv import load_dotenv + from os import getenv + from web3 import Web3 + + load_dotenv() + + LOG_LEVEL = getenv('LOG_LEVEL', 'INFO') + PROVIDER_URL = getenv('PROVIDER_URL') + PRIVATE_KEY = getenv('PRIVATE_KEY') + PENDING_TRANSACTIONS_THRESHOLD = 1000 + + logger = logging.getLogger() + coloredlogs.install(level=LOG_LEVEL, logger=logger) + + provider = Web3.HTTPProvider(PROVIDER_URL) + chain_id = provider.make_request('net_version', []) + chain_id = int(chain_id['result']) + logger.info(f'Got chain ID: {chain_id}') + + node_config = provider.make_request('taraxa_getConfig', []) + initial_validators = list( + map(lambda x: Web3.to_checksum_address(x['address']), node_config['result']['dpos']['initial_validators'])) + logger.info(f'Got initial validators: {initial_validators}') + + web3 = Web3(provider) + logger.info(f'Connected to Taraxa node: {PROVIDER_URL}') + + last_block = web3.eth.getBlock('latest') + logger.info(f'Last block: #{last_block.number}') + + account = web3.eth.account.from_key(PRIVATE_KEY) + logger.info(f'Account: {account.address}') + + transaction_count = int(web3.eth.get_transaction_count(account.address)) + logger.info(f'Transaction count for address: {transaction_count}') + + while True: + pending_transactions = web3.eth.get_block_transaction_count('pending') + logger.info(f'Number of pending transactions: {pending_transactions}') + if pending_transactions > PENDING_TRANSACTIONS_THRESHOLD: + logger.info( + f'Number of pending transactions is above threshold, sleeping for 10 seconds') + time.sleep(10) + continue + + logger.info(f'Sending transactions to initial validators') + + for initial_validator in initial_validators: + transaction_count = transaction_count+1 + logger.info( + f'Sending transaction #{transaction_count} to {initial_validator}') + + transaction = { + 'from': account.address, + 'to': initial_validator, + 'value': 1, + 'gas': 21000, + 'gasPrice': 1, + 'nonce': transaction_count, + 'chainId': chain_id, + } + logger.debug(f'Transaction {transaction}') + signed_transaction = account.sign_transaction(transaction) + web3.eth.send_raw_transaction(signed_transaction.rawTransaction) + + time.sleep(1) +{{- end }} diff --git a/charts/taraxa-node/templates/transaction-generation-statefulset.yaml b/charts/taraxa-node/templates/transaction-generation-statefulset.yaml new file mode 100644 index 0000000000..c620d5b3fb --- /dev/null +++ b/charts/taraxa-node/templates/transaction-generation-statefulset.yaml @@ -0,0 +1,73 @@ +{{ if .Values.transactionGeneration.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Release.Name }}-transaction-generation + labels: + app: transaction-generation + app.kubernetes.io/name: transaction-generation + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: 1 + serviceName: {{ .Release.Name }}-transaction-generation + # to launch or terminate all Pods in parallel. + # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#parallel-pod-management + podManagementPolicy: Parallel + selector: + matchLabels: + app: transaction-generation + app.kubernetes.io/name: {{ .Release.Name }}-transaction-generation + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + name: transaction-generation + labels: + app: transaction-generation + app.kubernetes.io/name: {{ .Release.Name }}-transaction-generation + app.kubernetes.io/instance: {{ .Release.Name }} + annotations: + kubernetes.io/change-cause: "Configuration through configmaps." + spec: + containers: + - name: transaction-generation + image: "python:3.8" + imagePullPolicy: IfNotPresent + env: + - name: PRIVATE_KEY + valueFrom: + secretKeyRef: + name: {{ .Release.Name }} + key: TRANSACTION_GENERATION_PRIVATE_KEY + - name: PROVIDER_URL + value: http://{{ include "taraxa-node.fullname" . }}-head:7777 + command: ["/bin/bash", "-c", "--"] + args: [ "pip install -r /app/requirements.txt && python /app/transactions.py" ] + volumeMounts: + - name: requirements + mountPath: /app/requirements.txt + readOnly: true + subPath: requirements.txt + - name: script + mountPath: /app/transactions.py + readOnly: true + subPath: transactions.py + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: requirements + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-transaction-generation + - name: script + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-transaction-generation +{{- end }} diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index f62ff08f9a..aa49074ef8 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -16,7 +16,7 @@ image: pullPolicy: IfNotPresent config: - # integer, 1=Mainnet, 2=Testnet, 3=Devnet) + # integer, 841=Mainnet, 842=Testnet, 843=Devnet # 100 for default helm test network: "100" extraArgs: [] @@ -33,6 +33,11 @@ config: - "badf7196e18f653130564fd2f27419bff36194094057a69053bbe3a83a97b4fe" vrfs: - "c7c32f136cf4529471905a6b775ad82a076a5b5d3160b76ba683c743b8a852cff06560102e3dbab2e8b62082611dfc378c90336d01c0a7fd2a1a7bb88fb63478" + consensusnodeLight: + keys: + - "a48867f0133acd5e10dd980c4ad824da69c6c1947d2fb6c2b576f41cccf5e782" + vrfs: + - "6441cd427dcad51d7a2054d777237e1e53f6cb280eebfed6a6647a5c15fd0808d24dab2ffe1c32b4b608bdadf657f82f1871fa8dc19faeef3833bb3e42bb65ec" bootnode: keys: - "45dc56636faf97230f557e16345055f5839dad25f4b3f6f88a02add24b4a00fc" @@ -50,6 +55,10 @@ slack: channel: channel k8s_cluster: taraxa +transactionGeneration: + enabled: true + privateKey: "" + nameOverride: "" fullnameOverride: "" @@ -65,6 +74,7 @@ affinity: {} node: enabled: true + image: {} replicaCount: 20 loadBalancer: enabled: false @@ -94,6 +104,16 @@ node: annotationsRpcWS: {} annotationsGraphQl: {} annotationsGraphQlWS: {} + annotationsIndexer: + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/session-cookie-name: "stickounet" + nginx.ingress.kubernetes.io/session-cookie-expires: "172800" + nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" + nginx.ingress.kubernetes.io/affinity-mode: persistent + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-origin: "*" + nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, OPTIONS, DELETE" + nginx.ingress.kubernetes.io/cors-allow-headers: "Authorization,Range,Content-Range,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Access-Control-Allow-Origin" hosts: [] tls: [] ports: @@ -111,6 +131,9 @@ node: - name: udp-listen-port containerPort: 10002 protocol: UDP + - name: http-indexer + containerPort: 8080 + protocol: TCP service: ports: - name: rest @@ -130,6 +153,12 @@ node: - name: metrics port: 8888 protocol: TCP + - name: metrics-indexer + port: 2112 + protocol: TCP + - name: http-indexer + port: 8080 + protocol: TCP serviceMonitor: enabled: false resources: {} @@ -140,10 +169,23 @@ node: size: 30Gi storageClass: annotations: {} - + indexer: + enabled: false + image: + repository: gcr.io/jovial-meridian-249123/taraxa-indexer + tag: latest + pullPolicy: Always + persistence: + enabled: false + accessMode: ReadWriteOnce + size: 30Gi + storageClass: + annotations: {} + mountPoint: /data bootnode: enabled: true + image: {} replicaCount: 1 loadBalancer: enabled: false @@ -187,6 +229,7 @@ bootnode: consensusnode: enabled: true + image: {} replicaCount: 1 probes: enabled: true @@ -236,6 +279,61 @@ consensusnode: storageClass: annotations: {} +consensusnodeLight: + enabled: false + image: {} + replicaCount: 1 + probes: + enabled: true + debug: 0 + args: + - "taraxad" + - "--config" + - "/root/.taraxa/conf_taraxa.json" + - "--genesis" + - "/root/.taraxa/genesis_taraxa.json" + - "--wallet" + - "/root/.taraxa/wallet_taraxa.json" + - "--light" + ports: + - name: rest + containerPort: 7777 + - name: ws + containerPort: 8777 + - name: tcp-listen-port + containerPort: 10002 + protocol: TCP + - name: udp-listen-port + containerPort: 10002 + protocol: UDP + service: + ports: + - name: rest + port: 7777 + - name: ws + port: 8777 + - name: tcp-listen-port + port: 10002 + protocol: TCP + - name: udp-listen-port + port: 10002 + protocol: UDP + - name: metrics + port: 8888 + protocol: TCP + serviceMonitor: + enabled: false + resources: {} + nodeSelector: {} + persistence: + enabled: false + accessMode: ReadWriteOnce + size: 30Gi + storageClass: + annotations: {} + + + explorer: enabled: false @@ -260,3 +358,9 @@ test: repository: gcr.io/jovial-meridian-249123/python tag: latest pullPolicy: IfNotPresent + +kubemonkey: + enabled: false + mtbf: 2 + killMode: "fixed" + killValue: '1' \ No newline at end of file diff --git a/conanfile.py b/conanfile.py index ca76325458..364e1d5d2a 100644 --- a/conanfile.py +++ b/conanfile.py @@ -13,15 +13,15 @@ class TaraxaConan(ConanFile): generators = "cmake" def requirements(self): - self.requires("boost/1.80.0") - self.requires("cppcheck/2.7.5") - self.requires("openssl/1.1.1s") + self.requires("boost/1.81.0") + self.requires("cppcheck/2.10") + self.requires("openssl/1.1.1t") self.requires("cryptopp/8.7.0") - self.requires("gtest/1.12.1") + self.requires("gtest/1.13.0") self.requires("lz4/1.9.4") self.requires("rocksdb/6.29.5") self.requires("prometheus-cpp/1.1.0") - self.requires("libjson-rpc-cpp/1.3.0@bincrafters/stable") + self.requires("jsoncpp/1.9.5") def _configure_boost_libs(self): self.options["boost"].without_atomic = False @@ -63,17 +63,12 @@ def configure(self): self.options["cppcheck"].have_rules = False self.options["rocksdb"].use_rtti = True self.options["rocksdb"].with_lz4 = True - self.options["libjson-rpc-cpp"].shared = False # mpir is required by cppcheck and it causing gmp confict self.options["mpir"].enable_gmpcompat = False - # mpir is z3 dependency and it couldn't be built for arm - if (self.settings.arch == "armv8"): - self.options["cppcheck"].with_z3 = False - def _configure_cmake(self): cmake = CMake(self) - # set find path to clang utils dowloaded by that script + # set find path to clang utils downloaded by that script cmake.configure() return cmake diff --git a/doc/RPC.md b/doc/RPC.md index 5e80b6fa49..5713106b93 100644 --- a/doc/RPC.md +++ b/doc/RPC.md @@ -448,9 +448,6 @@ curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getConfig","params":[],"i "0x0274cfffea9fa850e54c93a23042f12a87358a82": "0x141e8d17", "0x111f91441efc8c6c0edf6534970cc887e2fabaa8": "0x24048ce3d" }, - "hardforks": { - "fix_genesis_fork_block": "0x102ca0" - } }, "pbft": { "committee_size": "0x3e8", @@ -481,6 +478,39 @@ curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getConfig","params":[],"i } ``` +### taraxa_getChainStats + +Returns current chain stats with count of transactions, PBFT blocks and DAG blocks + +#### Parameters + +none + +#### Returns + +`OBJECT` - current chain stats object +* `pbft_period`: `QUANTITY` - current PBFT period +* `dag_blocks_executed`: `QUANTITY` - count of executed(finalized) DAG blocks +* `transactions_executed`: `QUANTITY` - count of executed transactions + +#### Example + +```json +// Request +curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getChainStats","params":[],"id":1}' + +// Result +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "pbft_period": 50, + "dag_blocks_executed": 100, + "transactions_executed": 200 + } +} +``` + ## Test API ### get_sortition_change diff --git a/doc/building.md b/doc/building.md index b6434a9927..f016230256 100644 --- a/doc/building.md +++ b/doc/building.md @@ -29,7 +29,8 @@ will build out of the box without further effort: libsnappy-dev \ rapidjson-dev \ libgmp-dev \ - libmpfr-dev + libmpfr-dev \ + libmicrohttpd-dev # Optional. Needed to run py_test. This won't install on arm64 OS because package is missing in apt sudo add-apt-repository ppa:ethereum/ethereum @@ -40,7 +41,7 @@ will build out of the box without further effort: sudo python3 -m pip install conan==1.59.0 # Setup clang as default compiler either in your IDE or by env. variables" - export C="clang-14" + export CC="clang-14" export CXX="clang++-14" ### Clone the Repository @@ -90,7 +91,8 @@ will build out of the box without further effort: python3-pip \ rapidjson-dev \ libgmp-dev \ - libmpfr-dev + libmpfr-dev \ + libmicrohttpd-dev # Install conan package manager @@ -127,7 +129,7 @@ will build out of the box without further effort: rm -f llvm.sh # Setup clang as default compiler either in your IDE or by env. variables" - export C="clang-14" + export CC="clang-14" export CXX="clang++-14" ### Clone the Repository @@ -169,10 +171,10 @@ And optional: ### Install taraxa-node dependencies: -First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Currently there is no llvm-14 in brew, but it works well with llvm-13 +First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Clang-14 is used for compilation. brew update - brew install coreutils go autoconf automake gflags git libtool llvm@13 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr + brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd ### Clone the Repository @@ -186,7 +188,8 @@ First you need to get (Brew)[https://brew.sh/] package manager. After that you n # It is recommended to use clang because on other compilers you could face some errors conan profile new clang --detect && \ conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=13 clang && \ + conan profile update settings.compiler.version=14 clang && \ + conan profile update settings.compiler.compiler.cppstd=14 conan profile update settings.compiler.libcxx=libc++ clang && \ conan profile update env.CC=clang clang && \ conan profile update env.CXX=clang++ clang @@ -224,28 +227,6 @@ It could be cleaned up with: rm -rf ~/.conan/data ``` -#### Project building issue - -If you are facing strange errors with project compilation it could be a problem that after install of llvm clang if pointing to a default apple clang. You could check that with `clang --version`. It should not point to `/Library/Developer/CommandLineTools/usr/bin`, but something like `/usr/local/opt/llvm/bin`. So you should specify full paths to a compiler: -1. Check full path with `brew info llvm`. Search for command that looks like -``` - echo 'export PATH="/usr/local/opt/llvm/bin:$PATH"' >> ~/.zshrc -``` -2. Take bin path from it. In our case this is `/usr/local/opt/llvm/bin` It shouldn't differ for most cases. -3. Append compiler to it and specify it in conan profile: -``` - conan profile update env.CC=/usr/local/opt/llvm/bin/clang clang && \ - conan profile update env.CXX=/usr/local/opt/llvm/bin/clang++ clang -``` -4. Specify compiler with full path to cmake: -``` -cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/local/opt/llvm/bin/clang -DCMAKE_CXX_COMPILER=/usr/local/opt/llvm/bin/clang++ ../ -``` -5. After successfull finish of that command processing compile project with: -``` -make -j$(nproc) -``` - ## Building on M1 Macs for x86_64 with Rosetta2 You should be able to build project following default MacOS building process. But here is a guide how to build project for x86_64 arch with Rosetta2. @@ -264,7 +245,7 @@ You should be able to build project following default MacOS building process. Bu ### Install dependencies - /usr/local/bin/brew install coreutils go autoconf automake gflags git libtool llvm@13 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr + /usr/local/bin/brew install coreutils go autoconf automake gflags git libtool llvm@13 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd ### Clone the Repository diff --git a/libraries/aleth/libdevcore/CommonData.h b/libraries/aleth/libdevcore/CommonData.h index 4b7177a828..56733d8601 100644 --- a/libraries/aleth/libdevcore/CommonData.h +++ b/libraries/aleth/libdevcore/CommonData.h @@ -73,19 +73,19 @@ static bool isHash(std::string const& _hash) { /// Converts byte array to a string containing the same (binary) data. Unless /// the byte array happens to contain ASCII data, this won't be printable. inline std::string asString(bytes const& _b) { - return std::string((char const*)_b.data(), (char const*)(_b.data() + _b.size())); + return std::string(reinterpret_cast(_b.data()), reinterpret_cast(_b.data() + _b.size())); } /// Converts byte array ref to a string containing the same (binary) data. /// Unless the byte array happens to contain ASCII data, this won't be /// printable. inline std::string asString(bytesConstRef _b) { - return std::string((char const*)_b.data(), (char const*)(_b.data() + _b.size())); + return std::string(reinterpret_cast(_b.data()), reinterpret_cast(_b.data() + _b.size())); } /// Converts a string to a byte array containing the string's (byte) data. inline bytes asBytes(std::string const& _b) { - return bytes((::byte const*)_b.data(), (::byte const*)(_b.data() + _b.size())); + return bytes(reinterpret_cast<::byte const*>(_b.data()), reinterpret_cast<::byte const*>(_b.data() + _b.size())); } /// Converts a string into the big-endian base-16 stream of integers (NOT diff --git a/libraries/aleth/libdevcore/FixedHash.h b/libraries/aleth/libdevcore/FixedHash.h index 645142ef3e..63c60f3f55 100644 --- a/libraries/aleth/libdevcore/FixedHash.h +++ b/libraries/aleth/libdevcore/FixedHash.h @@ -209,7 +209,8 @@ class FixedHash { /// Populate with random data. template void randomize(Engine& _eng) { - for (auto& i : m_data) i = (uint8_t)std::uniform_int_distribution(0, 255)(_eng); + std::generate(m_data.begin(), m_data.end(), + [&]() { return (uint8_t)std::uniform_int_distribution(0, 255)(_eng); }); } /// @returns a random valued object. @@ -404,8 +405,8 @@ class SecureFixedHash : private FixedHash { /// Fast equality operator for h256. template <> inline bool FixedHash<32>::operator==(FixedHash<32> const& _other) const { - const uint64_t* hash1 = (const uint64_t*)data(); - const uint64_t* hash2 = (const uint64_t*)_other.data(); + const uint64_t* hash1 = reinterpret_cast(data()); + const uint64_t* hash2 = reinterpret_cast(_other.data()); return (hash1[0] == hash2[0]) && (hash1[1] == hash2[1]) && (hash1[2] == hash2[2]) && (hash1[3] == hash2[3]); } diff --git a/libraries/aleth/libdevcore/RLP.cpp b/libraries/aleth/libdevcore/RLP.cpp index e85aff99a2..a8a34966cb 100644 --- a/libraries/aleth/libdevcore/RLP.cpp +++ b/libraries/aleth/libdevcore/RLP.cpp @@ -115,27 +115,27 @@ size_t RLP::length() const { if (m_data.size() <= size_t(n - c_rlpDataIndLenZero)) BOOST_THROW_EXCEPTION(BadRLP()); if (m_data.size() > 1) if (m_data[1] == 0) BOOST_THROW_EXCEPTION(BadRLP()); - unsigned lengthSize = n - c_rlpDataIndLenZero; - if (lengthSize > sizeof(ret)) + const unsigned length_size = n - c_rlpDataIndLenZero; + if (length_size > sizeof(ret)) // We did not check, but would most probably not fit in our memory. BOOST_THROW_EXCEPTION(UndersizeRLP()); // No leading zeroes. if (!m_data[1]) BOOST_THROW_EXCEPTION(BadRLP()); - for (unsigned i = 0; i < lengthSize; ++i) ret = (ret << 8) | m_data[i + 1]; + for (unsigned i = 0; i < length_size; ++i) ret = (ret << 8) | m_data[i + 1]; // Must be greater than the limit. if (ret < c_rlpListStart - c_rlpDataImmLenStart - c_rlpMaxLengthBytes) BOOST_THROW_EXCEPTION(BadRLP()); } else if (n <= c_rlpListIndLenZero) return n - c_rlpListStart; else { - unsigned lengthSize = n - c_rlpListIndLenZero; - if (m_data.size() <= lengthSize) BOOST_THROW_EXCEPTION(BadRLP()); + const unsigned length_size = n - c_rlpListIndLenZero; + if (m_data.size() <= length_size) BOOST_THROW_EXCEPTION(BadRLP()); if (m_data.size() > 1) if (m_data[1] == 0) BOOST_THROW_EXCEPTION(BadRLP()); - if (lengthSize > sizeof(ret)) + if (length_size > sizeof(ret)) // We did not check, but would most probably not fit in our memory. BOOST_THROW_EXCEPTION(UndersizeRLP()); if (!m_data[1]) BOOST_THROW_EXCEPTION(BadRLP()); - for (unsigned i = 0; i < lengthSize; ++i) ret = (ret << 8) | m_data[i + 1]; + for (unsigned i = 0; i < length_size; ++i) ret = (ret << 8) | m_data[i + 1]; if (ret < 0x100 - c_rlpListStart - c_rlpMaxLengthBytes) BOOST_THROW_EXCEPTION(BadRLP()); } // We have to be able to add payloadOffset to length without overflow. diff --git a/libraries/aleth/libdevcore/RLP.h b/libraries/aleth/libdevcore/RLP.h index 98fef745c2..15f897f344 100644 --- a/libraries/aleth/libdevcore/RLP.h +++ b/libraries/aleth/libdevcore/RLP.h @@ -84,7 +84,7 @@ class RLP { /// Construct a node to read RLP data in the string. explicit RLP(std::string const& _s, Strictness _st = VeryStrict) - : RLP(bytesConstRef((::byte const*)_s.data(), _s.size()), _st) {} + : RLP(bytesConstRef(reinterpret_cast<::byte const*>(_s.data()), _s.size()), _st) {} /// The bare data of the RLP. bytesConstRef data() const { return m_data; } @@ -251,7 +251,8 @@ class RLP { std::vector ret; if (isList()) { ret.reserve(itemCount()); - for (auto const i : *this) ret.push_back(i.convert(_flags)); + std::transform((*this).begin(), (*this).end(), std::back_inserter(ret), + [_flags](const auto i) { return i.template convert(_flags); }); } else if (_flags & ThrowOnFail) BOOST_THROW_EXCEPTION(BadCast()); return ret; diff --git a/libraries/aleth/libdevcrypto/Common.h b/libraries/aleth/libdevcrypto/Common.h index a3fe92e29c..83c23b1324 100644 --- a/libraries/aleth/libdevcrypto/Common.h +++ b/libraries/aleth/libdevcrypto/Common.h @@ -34,7 +34,7 @@ struct SignatureStruct { SignatureStruct() = default; SignatureStruct(Signature const& _s) { *(h520*)this = _s; } SignatureStruct(h256 const& _r, h256 const& _s, byte _v) : r(_r), s(_s), v(_v) {} - operator Signature() const { return *(h520 const*)this; } + operator Signature() const { return *reinterpret_cast(this); } /// @returns true if r,s,v values are valid, otherwise false bool isValid() const noexcept; diff --git a/libraries/aleth/libdevcrypto/CryptoPP.cpp b/libraries/aleth/libdevcrypto/CryptoPP.cpp index da5b5ff5f3..42ebf71d89 100644 --- a/libraries/aleth/libdevcrypto/CryptoPP.cpp +++ b/libraries/aleth/libdevcrypto/CryptoPP.cpp @@ -119,7 +119,7 @@ bool Secp256k1PP::decryptECIES(Secret const& _k, bytesConstRef _sharedMacData, b return false; Secret z; - if (!ecdh::agree(_k, *(Public*)(io_text.data() + 1), z)) return false; // Invalid pubkey or seckey. + if (!ecdh::agree(_k, *reinterpret_cast(io_text.data() + 1), z)) return false; // Invalid pubkey or seckey. auto key = ecies::kdf(z, bytes(), 64); bytesConstRef eKey = bytesConstRef(&key).cropped(0, 16); bytesRef mKeyMaterial = bytesRef(&key).cropped(16, 16); diff --git a/libraries/aleth/libp2p/Host.cpp b/libraries/aleth/libp2p/Host.cpp index 4d626d95ec..1e773acf73 100644 --- a/libraries/aleth/libp2p/Host.cpp +++ b/libraries/aleth/libp2p/Host.cpp @@ -55,7 +55,7 @@ Host::Host(std::string _clientVersion, KeyPair const& kp, NetworkConfig _n, Tara handshake_ctx.port = m_listenPort; handshake_ctx.client_version = m_clientVersion; handshake_ctx.on_success = [this](auto const& id, auto const& rlp, auto frame_coder, auto socket) { - ba::post(strand_, [=, this, _ = shared_from_this(), rlp = rlp.data().cropped(0, rlp.actualSize()).toBytes(), + ba::post(strand_, [=, this, rlp = rlp.data().cropped(0, rlp.actualSize()).toBytes(), frame_coder = std::move(frame_coder)]() mutable { startPeerSession(id, RLP(rlp), std::move(frame_coder), socket); }); @@ -94,11 +94,15 @@ Host::Host(std::string _clientVersion, KeyPair const& kp, NetworkConfig _n, Tara } } LOG(m_logger) << "devp2p started. Node id: " << id(); - runAcceptor(); //!!! this needs to be post to session_ioc_ as main_loop_body handles peer/session related stuff // and it should not be execute for bootnodes, but it needs to bind with strand_ // as it touching same structures as discovery part !!! - ba::post(session_ioc_, [this] { ba::post(strand_, [this] { main_loop_body(); }); }); + ba::post(session_ioc_, [this] { + ba::post(strand_, [this] { + runAcceptor(); + main_loop_body(); + }); + }); } std::shared_ptr Host::make(std::string _clientVersion, CapabilitiesFactory const& cap_factory, KeyPair const& kp, @@ -116,10 +120,6 @@ std::shared_ptr Host::make(std::string _clientVersion, CapabilitiesFactory } Host::~Host() { - // reset io_context (allows manually polling network, below) - ioc_.stop(); - session_ioc_.restart(); - // shutdown acceptor from same executor ba::post(m_tcp4Acceptor.get_executor(), [this] { m_tcp4Acceptor.cancel(); @@ -136,9 +136,13 @@ Host::~Host() { s->disconnect(ClientQuit); } } - while (0 < session_ioc_.poll()) + // We need to poll both as strand_ is ioc_ + while (0 < session_ioc_.poll() + ioc_.poll()) ; save_state(); + + ioc_.restart(); + session_ioc_.restart(); } ba::io_context::count_type Host::do_work() { @@ -423,7 +427,7 @@ void Host::runAcceptor() { } else { // incoming connection; we don't yet know nodeid auto handshake = make_shared(handshake_ctx_, socket); - ba::post(strand_, [=, this, this_shared = shared_from_this()] { + ba::post(strand_, [=, this] { m_connecting.push_back(handshake); handshake->start(); }); @@ -462,24 +466,24 @@ void Host::connect(shared_ptr const& _p) { bi::tcp::endpoint ep(_p->get_endpoint()); cnetdetails << "Attempting connection to " << _p->id << "@" << ep << " from " << id(); auto socket = make_shared(bi::tcp::socket(make_strand(session_ioc_))); - socket->ref().async_connect( - ep, ba::bind_executor(strand_, [=, this, this_shared = shared_from_this()](boost::system::error_code const& ec) { - _p->m_lastAttempted = chrono::system_clock::now(); - _p->m_failedAttempts++; - - if (ec) { - cnetdetails << "Connection refused to node " << _p->id << "@" << ep << " (" << ec.message() << ")"; - // Manually set error (session not present) - _p->m_lastDisconnect = TCPError; - } else { - cnetdetails << "Starting RLPX handshake with " << _p->id << "@" << ep; - auto handshake = make_shared(handshake_ctx_, socket, _p->id); - m_connecting.push_back(handshake); - - handshake->start(); - } - m_pendingPeerConns.erase(nptr); - })); + socket->ref().async_connect(ep, ba::bind_executor(strand_, [=, this](boost::system::error_code const& ec) { + _p->m_lastAttempted = chrono::system_clock::now(); + _p->m_failedAttempts++; + + if (ec) { + cnetdetails << "Connection refused to node " << _p->id << "@" << ep << " (" + << ec.message() << ")"; + // Manually set error (session not present) + _p->m_lastDisconnect = TCPError; + } else { + cnetdetails << "Starting RLPX handshake with " << _p->id << "@" << ep; + auto handshake = make_shared(handshake_ctx_, socket, _p->id); + m_connecting.push_back(handshake); + + handshake->start(); + } + m_pendingPeerConns.erase(nptr); + })); } PeerSessionInfos Host::peerSessionInfos() const { diff --git a/libraries/aleth/libp2p/Host.h b/libraries/aleth/libp2p/Host.h index fde4ea5d60..b24b70c9dd 100644 --- a/libraries/aleth/libp2p/Host.h +++ b/libraries/aleth/libp2p/Host.h @@ -145,9 +145,9 @@ struct Host final : std::enable_shared_from_this { return "enode://" + id().hex() + "@" + address + ":" + port; } + bool nodeTableHasNode(Public const& _id) const; // private but can be made public if needed private: - bool nodeTableHasNode(Public const& _id) const; Node nodeFromNodeTable(Public const& _id) const; struct KnownNode { diff --git a/libraries/aleth/libp2p/UPnP.cpp b/libraries/aleth/libp2p/UPnP.cpp index 0451c91970..bcda5f2c0d 100644 --- a/libraries/aleth/libp2p/UPnP.cpp +++ b/libraries/aleth/libp2p/UPnP.cpp @@ -56,7 +56,7 @@ UPnP::UPnP() #elif MINIUPNPC_API_VERSION >= 9 descXML = (char*)miniwget(dev->descURL, &descXMLsize, 0); #else - descXML = (char*)miniwget(dev->descURL, &descXMLsize); + descXML = static_cast(miniwget(dev->descURL, &descXMLsize)); #endif if (descXML) { parserootdesc(descXML, descXMLsize, m_data.get()); diff --git a/libraries/cli/include/cli/config.hpp b/libraries/cli/include/cli/config.hpp index 9fcabe410c..99ed1cc28c 100644 --- a/libraries/cli/include/cli/config.hpp +++ b/libraries/cli/include/cli/config.hpp @@ -38,11 +38,11 @@ class Config { static constexpr const char* REBUILD_DB = "rebuild-db"; static constexpr const char* REBUILD_DB_PERIOD = "rebuild-db-period"; static constexpr const char* REVERT_TO_PERIOD = "revert-to-period"; - static constexpr const char* REBUILD_DB_COLUMNS = "rebuild-db-columns"; static constexpr const char* LIGHT = "light"; static constexpr const char* HELP = "help"; static constexpr const char* VERSION = "version"; static constexpr const char* WALLET = "wallet"; + static constexpr const char* PRUNE_STATE_DB = "prune-state-db"; static constexpr const char* NODE_COMMAND = "node"; static constexpr const char* ACCOUNT_COMMAND = "account"; diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 781aa879e9..ff7bf49bb2 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -13,8 +13,8 @@ "delegation_locking_period": "0x5", "eligibility_balance_threshold": "0xd3c21bcecceda1000000", "vote_eligibility_balance_step": "0x152d02c7e14af6800000", - "validator_maximum_stake": "0x84595161401484A000000", - "minimum_deposit": "0x0", + "validator_maximum_stake": "0x84595161401484a000000", + "minimum_deposit": "0x3635c9adc5dea00000", "max_block_author_reward": "0x5", "dag_proposers_reward": "0x32", "commission_change_delta": "0x0", @@ -28,7 +28,7 @@ "endpoint": "", "description": "Taraxa devnet validator 1", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "05fe580fd2d461ee5f762a33bbe669403bb04a851f2e9ed8d2579a9c9b77c3ec" }, @@ -39,7 +39,7 @@ "endpoint": "", "description": "Taraxa devnet validator 2", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "70d34c86787e5f7bd0f266cad291cb521e23176fa37c6efc034858a1620ac69e" }, @@ -50,7 +50,7 @@ "endpoint": "", "description": "Taraxa devnet validator 3", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "f8d5c00ce9fa3058341e051b36a1e6ccf69df81fb865568b2bf1507d085691e2" }, @@ -61,7 +61,7 @@ "endpoint": "", "description": "Taraxa devnet validator 4", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "aa12507d00c992b95e65d80b21fd2db5b48c4f7ff4393064828d1adc930710b4" }, @@ -72,7 +72,7 @@ "endpoint": "", "description": "Taraxa devnet validator 5", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "bd34898ae0080187c408b5724f05682855c4425fda61d332f5f9d746d4eb753a" }, @@ -83,7 +83,7 @@ "endpoint": "", "description": "Taraxa devnet validator 6", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "25d35fed93989c40b4e8685d9d7ee02213230221ea9efcbe8cfccfc788670dba" }, @@ -94,7 +94,7 @@ "endpoint": "", "description": "Taraxa devnet validator 7", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "55c0bd1af84fb793a5dd7b960e330248d8a0acde566922b3e210f43592700dad" }, @@ -105,7 +105,7 @@ "endpoint": "", "description": "Taraxa devnet validator 8", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "33131367e7279ee51c0f26c6f9b6627848f822d134abef21a88be467dfbaae7b" }, @@ -116,7 +116,7 @@ "endpoint": "", "description": "Taraxa devnet validator 9", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "da63de37c69a59cb3ebbcfb79ef8d561b18b448b544a14438c62cd56bc0a29f5" }, @@ -127,7 +127,7 @@ "endpoint": "", "description": "Taraxa devnet validator 10", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "337178752602a5ca38928bf0d8d434ec653505c92b280b0edab6c39d5e79f4fd" }, @@ -138,7 +138,7 @@ "endpoint": "", "description": "Taraxa devnet validator 11", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "ac08e4ca5f1bcdd61dbefa7551ab839bdd4545e59ee8a4ab5d3aabb71104ab73" }, @@ -149,7 +149,7 @@ "endpoint": "", "description": "Taraxa devnet validator 12", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "189b05cca0a816a36f977f0541ef7585218b2087f04b23444ab58d0c755adecc" }, @@ -160,7 +160,7 @@ "endpoint": "", "description": "Taraxa devnet validator 13", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "8e95172f90b68ee753132bf6342ee00b398e2417312f610d58c34729ab0608ee" }, @@ -171,7 +171,7 @@ "endpoint": "", "description": "Taraxa devnet validator 14", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "959551740ff948d9714f15a2bfb2183c4ead897dd79775a0a18488aa8936e2ba" }, @@ -182,7 +182,7 @@ "endpoint": "", "description": "Taraxa devnet validator 15", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "56b7831cb3e35c1d6d1e3f661de2068d6feeaa54074b3e02709a87d7f0d6c72a" }, @@ -193,7 +193,7 @@ "endpoint": "", "description": "Taraxa devnet validator 16", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "e774c519814cbc04008aa958932e7adb82ebbbd6ca69089c0a1458ea34fb4299" }, @@ -204,7 +204,7 @@ "endpoint": "", "description": "Taraxa devnet validator 17", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "1b15b7bef6a1dbe9aeb2792f2e38d6222d31f8c6c15cff1152f258013d70d933" }, @@ -215,7 +215,7 @@ "endpoint": "", "description": "Taraxa devnet validator 18", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "37bf145ac98e7de7db6e5b933e72737fbf190fd4fb1d193b15cf8b00db30ba30" } @@ -226,6 +226,10 @@ "7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x1027e72f1f12813088000000", "ee1326fbf7d9322e5ea02c6fe5eb63535fceccd1": "0x52b7d2dcc80cd2e4000000" }, + "hardforks": { + "rewards_distribution_frequency": { + } + }, "gas_price": { "blocks": 200, "percentile": 60, diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 5c9777aae6..a488289bce 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -243,6 +243,10 @@ } ] }, + "hardforks": { + "rewards_distribution_frequency": { + } + }, "initial_balances": { "723304d1357a2334fcf902aa3d232f5139080a1b": "0xd53323b7ca3737afbb45000", "b0800c7af0a6aec0ff8dbe01708bd8e300c6305b": "0x208b1d135e4a8000", diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index f07f92f352..7a7f30890f 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -1698,6 +1698,10 @@ "a903715b57d3bf62e098a6a643c6924d9bdacec4": "0x170a0f5040e50400000", "5bd47fef8e8dcb6677c2957ecd78b8232354f145": "0x191cf61eb2bec223400" }, + "hardforks": { + "rewards_distribution_frequency": { + } + }, "gas_price": { "blocks": 200, "percentile": 60, diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index 65f9b02e75..2a4c8fdf20 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -38,7 +38,8 @@ Config::Config(int argc, const char* argv[]) { bool destroy_db = false; bool rebuild_network = false; bool rebuild_db = false; - bool rebuild_db_columns = false; + bool prune_state_db = false; + bool light_node = false; bool version = false; uint64_t rebuild_db_period = 0; @@ -81,8 +82,6 @@ Config::Config(int argc, const char* argv[]) { "rebuilding all the other " "database tables - this could take a long " "time"); - node_command_options.add_options()(REBUILD_DB_COLUMNS, bpo::bool_switch(&rebuild_db_columns), - "Removes old DB columns "); node_command_options.add_options()(REBUILD_DB_PERIOD, bpo::value(&rebuild_db_period), "Use with rebuild-db - Rebuild db up " "to a specified period"); @@ -132,6 +131,7 @@ Config::Config(int argc, const char* argv[]) { "Enables Test JsonRPC. Disabled by default"); node_command_options.add_options()(ENABLE_DEBUG, bpo::bool_switch(&enable_debug), "Enables Debug RPC interface. Disabled by default"); + node_command_options.add_options()(PRUNE_STATE_DB, bpo::bool_switch(&prune_state_db), "Prune state_db"); allowed_options.add(main_options); @@ -214,11 +214,6 @@ Config::Config(int argc, const char* argv[]) { auto default_genesis_json = tools::getGenesis((Config::ChainIdType)chain_id); // override hardforks data with one from default json addNewHardforks(genesis_json, default_genesis_json); - // add vote_eligibility_balance_step field if it is missing in the config - if (genesis_json["dpos"]["vote_eligibility_balance_step"].isNull()) { - genesis_json["dpos"]["vote_eligibility_balance_step"] = - default_genesis_json["dpos"]["vote_eligibility_balance_step"]; - } write_config_and_wallet_files(); } // Override config values with values from CLI @@ -269,7 +264,7 @@ Config::Config(int argc, const char* argv[]) { } node_config_.db_config.db_revert_to_period = revert_to_period; node_config_.db_config.rebuild_db = rebuild_db; - node_config_.db_config.rebuild_db_columns = rebuild_db_columns; + node_config_.db_config.prune_state_db = prune_state_db; node_config_.db_config.rebuild_db_period = rebuild_db_period; node_config_.enable_test_rpc = enable_test_rpc; diff --git a/libraries/common/include/common/encoding_rlp.hpp b/libraries/common/include/common/encoding_rlp.hpp index e973d9decd..41a4c2c917 100644 --- a/libraries/common/include/common/encoding_rlp.hpp +++ b/libraries/common/include/common/encoding_rlp.hpp @@ -9,7 +9,7 @@ #include "common/range_view.hpp" #include "common/util.hpp" -namespace taraxa::util::encoding_rlp { +namespace taraxa::util { using dev::RLP; using RLPEncoderRef = dev::RLPStream&; @@ -153,13 +153,12 @@ void __dec_rlp_tuple_body__(RLP::iterator& i, RLP::iterator const& end, RLP::Str } } -struct InvalidEncodingSize : std::invalid_argument { - uint expected, actual; +struct InvalidEncodingSize : dev::RLPException { + dev::bigint expected, actual; - InvalidEncodingSize(uint expected, uint actual) - : invalid_argument(fmt("Invalid rlp list size; expected: %s, actual: %s", expected, actual)), - expected(expected), - actual(actual) {} + InvalidEncodingSize(uint e, uint a) : expected(e), actual(a) { + RLPException() << dev::errinfo_comment("Invalid rlp list size") << dev::RequirementError(expected, actual); + } }; template @@ -196,34 +195,16 @@ bytes rlp_enc(T const& obj) { return std::move(s.invalidate()); } -} // namespace taraxa::util::encoding_rlp - -#define HAS_RLP_FIELDS \ - void rlp(::taraxa::util::encoding_rlp::RLPDecoderRef encoding); \ - void rlp(::taraxa::util::encoding_rlp::RLPEncoderRef encoding) const; +} // namespace taraxa::util -#define RLP_FIELDS_DEFINE(_class_, ...) \ - void _class_::rlp(::taraxa::util::encoding_rlp::RLPDecoderRef encoding) { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } \ - void _class_::rlp(::taraxa::util::encoding_rlp::RLPEncoderRef encoding) const { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } +#define HAS_RLP_FIELDS \ + void rlp(::taraxa::util::RLPDecoderRef encoding); \ + void rlp(::taraxa::util::RLPEncoderRef encoding) const; -#define RLP_FIELDS_DEFINE_INPLACE(...) \ - void rlp(::taraxa::util::encoding_rlp::RLPDecoderRef encoding) { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } \ - void rlp(::taraxa::util::encoding_rlp::RLPEncoderRef encoding) const { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } +#define RLP_FIELDS_DEFINE(_class_, ...) \ + void _class_::rlp(::taraxa::util::RLPDecoderRef encoding) { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } \ + void _class_::rlp(::taraxa::util::RLPEncoderRef encoding) const { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } -namespace taraxa::util { -using encoding_rlp::InvalidEncodingSize; -using encoding_rlp::rlp; -using encoding_rlp::rlp_dec; -using encoding_rlp::rlp_enc; -using encoding_rlp::rlp_tuple; -using encoding_rlp::RLPDecoderRef; -using encoding_rlp::RLPEncoderRef; -} // namespace taraxa::util +#define RLP_FIELDS_DEFINE_INPLACE(...) \ + void rlp(::taraxa::util::RLPDecoderRef encoding) { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } \ + void rlp(::taraxa::util::RLPEncoderRef encoding) const { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } diff --git a/libraries/common/include/common/range_view.hpp b/libraries/common/include/common/range_view.hpp index 5c93b77870..8f6cbf29d4 100644 --- a/libraries/common/include/common/range_view.hpp +++ b/libraries/common/include/common/range_view.hpp @@ -3,7 +3,7 @@ #include #include -namespace taraxa::util::range_view { +namespace taraxa::util { template struct RangeView { @@ -72,9 +72,4 @@ auto make_range_view(Seq const &seq) { return RangeView(seq); } -} // namespace taraxa::util::range_view - -namespace taraxa::util { -using range_view::make_range_view; -using range_view::RangeView; } // namespace taraxa::util diff --git a/libraries/common/src/vrf_wrapper.cpp b/libraries/common/src/vrf_wrapper.cpp index 66606b7ad1..fcf01f4103 100644 --- a/libraries/common/src/vrf_wrapper.cpp +++ b/libraries/common/src/vrf_wrapper.cpp @@ -5,23 +5,25 @@ namespace taraxa::vrf_wrapper { std::pair getVrfKeyPair() { vrf_sk_t sk; vrf_pk_t pk; - crypto_vrf_keypair((unsigned char *)pk.data(), (unsigned char *)sk.data()); + crypto_vrf_keypair(pk.data(), sk.data()); return {pk, sk}; } vrf_pk_t getVrfPublicKey(vrf_sk_t const &sk) { vrf_pk_t pk; - crypto_vrf_sk_to_pk((unsigned char *)pk.data(), (unsigned char *)sk.data()); + crypto_vrf_sk_to_pk(pk.data(), const_cast(sk.data())); return pk; } -bool isValidVrfPublicKey(vrf_pk_t const &pk) { return crypto_vrf_is_valid_key((unsigned char *)pk.data()) == 1; } +bool isValidVrfPublicKey(vrf_pk_t const &pk) { + return crypto_vrf_is_valid_key(const_cast(pk.data())) == 1; +} std::optional getVrfProof(vrf_sk_t const &sk, bytes const &msg) { vrf_proof_t proof; // crypto_vrf_prove return 0 on success! - if (!crypto_vrf_prove((unsigned char *)proof.data(), (const unsigned char *)sk.data(), - (const unsigned char *)msg.data(), msg.size())) { + if (!crypto_vrf_prove(proof.data(), const_cast(sk.data()), const_cast(msg.data()), + msg.size())) { return proof; } return {}; @@ -30,8 +32,9 @@ std::optional getVrfProof(vrf_sk_t const &sk, bytes const &msg) { std::optional getVrfOutput(vrf_pk_t const &pk, vrf_proof_t const &proof, bytes const &msg) { vrf_output_t output; // crypto_vrf_verify return 0 on success! - if (!crypto_vrf_verify((unsigned char *)output.data(), (const unsigned char *)pk.data(), - (const unsigned char *)proof.data(), (const unsigned char *)msg.data(), msg.size())) { + if (!crypto_vrf_verify(output.data(), const_cast(pk.data()), + const_cast(proof.data()), const_cast(msg.data()), + msg.size())) { return output; } return {}; diff --git a/libraries/config/include/config/config.hpp b/libraries/config/include/config/config.hpp index 5d8653044a..9a4388d0cb 100644 --- a/libraries/config/include/config/config.hpp +++ b/libraries/config/include/config/config.hpp @@ -15,8 +15,8 @@ struct DBConfig { uint32_t db_max_open_files = 0; PbftPeriod db_revert_to_period = 0; bool rebuild_db = false; + bool prune_state_db = false; PbftPeriod rebuild_db_period = 0; - bool rebuild_db_columns = false; }; void dec_json(Json::Value const &json, DBConfig &db_config); diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index e0ca9262a5..f0364b5dbc 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -5,7 +5,18 @@ #include "common/encoding_rlp.hpp" struct Hardforks { - uint64_t fix_genesis_fork_block = 0; + /* + * @brief key is block number at which change is applied and value is new distribution interval. + * Default distribution frequency is every block + * To change rewards distribution frequency we should add a new element in map below. + * For example {{101, 20}, {201, 10}} means: + * 1. for blocks [1,100] we are distributing rewards every block + * 2. for blocks [101, 200] rewards are distributed every 20 block. On blocks 120, 140, etc. + * 3. for blocks after 201 rewards are distributed every 10 block. On blocks 210, 220, etc. + */ + using RewardsDistributionMap = std::map; + RewardsDistributionMap rewards_distribution_frequency; + HAS_RLP_FIELDS }; diff --git a/libraries/config/include/config/state_config.hpp b/libraries/config/include/config/state_config.hpp index 6cd844094f..15a0a6da18 100644 --- a/libraries/config/include/config/state_config.hpp +++ b/libraries/config/include/config/state_config.hpp @@ -61,7 +61,7 @@ struct Config { EVMChainConfig evm_chain_config; BalanceMap initial_balances; DPOSConfig dpos; - // Hardforks hardforks; + Hardforks hardforks; HAS_RLP_FIELDS }; diff --git a/libraries/config/src/config.cpp b/libraries/config/src/config.cpp index ddf67d0da4..91746e521d 100644 --- a/libraries/config/src/config.cpp +++ b/libraries/config/src/config.cpp @@ -98,13 +98,11 @@ FullNodeConfig::FullNodeConfig(const Json::Value &string_or_object, const Json:: } is_light_node = getConfigDataAsBoolean(root, {"is_light_node"}, true, is_light_node); - if (is_light_node) { - const auto min_light_node_history = (genesis.state.dpos.blocks_per_year * kDefaultLightNodeHistoryDays) / 365; - light_node_history = getConfigDataAsUInt(root, {"light_node_history"}, true, min_light_node_history); - if (light_node_history < min_light_node_history) { - throw ConfigException("Min. required light node history is " + std::to_string(min_light_node_history) + - " blocks (" + std::to_string(kDefaultLightNodeHistoryDays) + " days)"); - } + const auto min_light_node_history = (genesis.state.dpos.blocks_per_year * kDefaultLightNodeHistoryDays) / 365; + light_node_history = getConfigDataAsUInt(root, {"light_node_history"}, true, min_light_node_history); + if (light_node_history < min_light_node_history) { + throw ConfigException("Min. required light node history is " + std::to_string(min_light_node_history) + + " blocks (" + std::to_string(kDefaultLightNodeHistoryDays) + " days)"); } try { diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index 950558fc19..fc4f804c0e 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -2,14 +2,23 @@ Json::Value enc_json(const Hardforks& obj) { Json::Value json(Json::objectValue); - json["fix_genesis_fork_block"] = dev::toJS(obj.fix_genesis_fork_block); + + auto& rewards = json["rewards_distribution_frequency"]; + rewards = Json::objectValue; + for (auto i = obj.rewards_distribution_frequency.begin(); i != obj.rewards_distribution_frequency.end(); ++i) { + rewards[std::to_string(i->first)] = i->second; + } return json; } void dec_json(const Json::Value& json, Hardforks& obj) { - if (auto const& e = json["fix_genesis_fork_block"]) { - obj.fix_genesis_fork_block = dev::getUInt(e); + if (const auto& e = json["rewards_distribution_frequency"]) { + assert(e.isObject()); + + for (auto itr = e.begin(); itr != e.end(); ++itr) { + obj.rewards_distribution_frequency[itr.key().asUInt64()] = itr->asUInt64(); + } } } -RLP_FIELDS_DEFINE(Hardforks, fix_genesis_fork_block) +RLP_FIELDS_DEFINE(Hardforks, rewards_distribution_frequency) diff --git a/libraries/config/src/network.cpp b/libraries/config/src/network.cpp index f4325fd9dc..3f4a83a73b 100644 --- a/libraries/config/src/network.cpp +++ b/libraries/config/src/network.cpp @@ -137,7 +137,7 @@ void dec_json(const Json::Value &json, NetworkConfig &network) { getConfigDataAsUInt(json, {"deep_syncing_threshold"}, true, network.deep_syncing_threshold); network.ddos_protection = dec_ddos_protection_config_json(getConfigData(json, {"ddos_protection"})); - for (auto &item : json["boot_nodes"]) { + for (const auto &item : json["boot_nodes"]) { network.boot_nodes.push_back(dec_json(item)); } auto listen_ip = boost::asio::ip::address::from_string(network.listen_ip); diff --git a/libraries/config/src/state_config.cpp b/libraries/config/src/state_config.cpp index 29278f78e8..a284a65e3e 100644 --- a/libraries/config/src/state_config.cpp +++ b/libraries/config/src/state_config.cpp @@ -22,14 +22,14 @@ void dec_json(const Json::Value& /*json*/, uint64_t chain_id, EVMChainConfig& ob void append_json(Json::Value& json, const Config& obj) { json["evm_chain_config"] = enc_json(obj.evm_chain_config); json["initial_balances"] = enc_json(obj.initial_balances); - // json["hardforks"] = enc_json(obj.hardforks); + json["hardforks"] = enc_json(obj.hardforks); json["dpos"] = enc_json(obj.dpos); } void dec_json(const Json::Value& json, Config& obj) { dec_json(json["evm_chain_config"], json["chain_id"].asUInt(), obj.evm_chain_config); dec_json(json["initial_balances"], obj.initial_balances); - // dec_json(json["hardforks"], obj.hardforks); + dec_json(json["hardforks"], obj.hardforks); dec_json(json["dpos"], obj.dpos); } diff --git a/libraries/core_libs/CMakeLists.txt b/libraries/core_libs/CMakeLists.txt index 9857d5348e..b5d3266a2c 100644 --- a/libraries/core_libs/CMakeLists.txt +++ b/libraries/core_libs/CMakeLists.txt @@ -15,11 +15,15 @@ file(GLOB_RECURSE STORAGE_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/storage/*.cpp) file(GLOB_RECURSE NODE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/node/*.hpp) file(GLOB_RECURSE NODE_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/node/*.cpp) +file(GLOB_RECURSE REWARDS_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/rewards/*.hpp) +file(GLOB_RECURSE REWARDS_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/rewards/*.cpp) + set(HEADERS ${CONSENSUS_HEADERS} ${NETWORK_HEADERS} ${STORAGE_HEADERS} ${NODE_HEADERS} + ${REWARDS_HEADERS} ) set(SOURCES @@ -28,6 +32,7 @@ set(SOURCES ${STORAGE_SOURCES} ${NODE_SOURCES} ${GRAPHQL_GENERATED_SOURCES} + ${REWARDS_SOURCES} ) add_library(core_libs ${SOURCES} ${HEADERS}) @@ -36,6 +41,7 @@ target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/consensu target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/network/include) target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/node/include) target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/storage/include) +target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/rewards/include) target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) # GraphQL target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/network/graphql/gen) @@ -47,7 +53,7 @@ target_link_libraries(core_libs PUBLIC taraxa-evm p2p metrics - CONAN_PKG::libjson-rpc-cpp + Jsonrpccpp CONAN_PKG::rocksdb # GraphQL cppgraphqlgen::graphqlservice diff --git a/libraries/core_libs/consensus/include/final_chain/contract_interface.hpp b/libraries/core_libs/consensus/include/final_chain/contract_interface.hpp index 9c4820edec..a4b7ddabb3 100644 --- a/libraries/core_libs/consensus/include/final_chain/contract_interface.hpp +++ b/libraries/core_libs/consensus/include/final_chain/contract_interface.hpp @@ -6,7 +6,6 @@ #include "common/types.hpp" #include "final_chain/final_chain.hpp" -#include "final_chain/rewards_stats.hpp" namespace taraxa::final_chain { class ContractInterface { diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 0d6f111446..f2d7948c1f 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -87,17 +87,6 @@ class FinalChain { * @return BlockHash h256 */ virtual std::optional block_hash(std::optional n = {}) const = 0; - struct TransactionHashes { - TransactionHashes() = default; - virtual ~TransactionHashes() = default; - TransactionHashes(const TransactionHashes&) = default; - TransactionHashes(TransactionHashes&&) = default; - TransactionHashes& operator=(const TransactionHashes&) = default; - TransactionHashes& operator=(TransactionHashes&&) = default; - - virtual size_t count() const = 0; - virtual h256 get(size_t i) const = 0; - }; /** * @brief Needed if we are changing params with hardfork and it affects Go part of code. For example DPOS contract @@ -188,14 +177,14 @@ class FinalChain { std::optional blk_n = {}) const = 0; /** - * @brief Trace execution of a new message call immediately without creating a transaction on the block chain. That + * @brief Trace execution of a new message call immediately without creating a transactions on the block chain. That * means that state would be reverted and not saved anywhere - * @param trx state_api::EVMTransaction + * @param trxs std::vector vector of transaction to trace * @param blk_n EthBlockNumber number of block we are getting state from * @return std::string */ - virtual std::string trace_trx(const state_api::EVMTransaction& trx, EthBlockNumber blk_n, - std::optional params = {}) const = 0; + virtual std::string trace(std::vector trx, EthBlockNumber blk_n, + std::optional params = {}) const = 0; /** * @brief total count of eligible votes are in DPOS precompiled contract diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index ae4535db74..c30763f864 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -5,8 +5,8 @@ #include #include "common/range_view.hpp" -#include "final_chain/rewards_stats.hpp" #include "final_chain/state_api_data.hpp" +#include "rewards/block_stats.hpp" #include "storage/storage.hpp" namespace taraxa::state_api { @@ -34,24 +34,19 @@ class StateAPI { void update_state_config(const Config& new_config); - Proof prove(EthBlockNumber blk_num, const root_t& state_root, const addr_t& addr, - const std::vector& keys) const; std::optional get_account(EthBlockNumber blk_num, const addr_t& addr) const; u256 get_account_storage(EthBlockNumber blk_num, const addr_t& addr, const u256& key) const; bytes get_code_by_address(EthBlockNumber blk_num, const addr_t& addr) const; ExecutionResult dry_run_transaction(EthBlockNumber blk_num, const EVMBlock& blk, const EVMTransaction& trx) const; - bytes trace_transaction(EthBlockNumber blk_num, const EVMBlock& blk, const EVMTransaction& trx, - std::optional params = {}) const; + bytes trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trx, + std::optional params = {}) const; StateDescriptor get_last_committed_state_descriptor() const; const StateTransitionResult& transition_state(const EVMBlock& block, const util::RangeView& transactions, - const util::RangeView& transactions_validators = {}, - const util::RangeView& uncles = {}, - const RewardsStats& rewards_stats = {}); + const std::vector& rewards_stats = {}); void transition_state_commit(); void create_snapshot(PbftPeriod period); - void prune(const dev::h256& state_root_to_keep, const std::vector& state_root_to_prune, - EthBlockNumber blk_num); + void prune(const std::vector& state_root_to_keep, EthBlockNumber blk_num); // DPOS uint64_t dpos_eligible_total_vote_count(EthBlockNumber blk_num) const; diff --git a/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp b/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp index 81f9a07e21..6166e58af4 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp @@ -92,20 +92,6 @@ struct Account { h256 const& storage_root_eth() const; } const ZeroAccount; -struct TrieProof { - bytes value; - std::vector nodes; - - HAS_RLP_FIELDS -}; - -struct Proof { - TrieProof account_proof; - std::vector storage_proofs; - - HAS_RLP_FIELDS -}; - struct StateDescriptor { EthBlockNumber blk_num = 0; h256 state_root; diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 5597de9bdf..c73c155118 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -205,7 +205,7 @@ class PbftManager : public std::enable_shared_from_this { * @brief Get PBFT lambda. PBFT lambda is a timer clock * @return PBFT lambda */ - std::chrono::milliseconds getPbftInitialLambda() const { return LAMBDA_ms_MIN; } + std::chrono::milliseconds getPbftInitialLambda() const { return kMinLambda; } /** * @brief Calculate DAG blocks ordering hash @@ -253,11 +253,6 @@ class PbftManager : public std::enable_shared_from_this { */ void resume(); - /** - * @brief Resume PBFT daemon on single state. Only to be used for unit tests - */ - void resumeSingleState(); - /** * @brief Get a proposed PBFT block based on specified period and block hash * @param period @@ -273,18 +268,16 @@ class PbftManager : public std::enable_shared_from_this { size_t getPbftCommitteeSize() const { return config_.committee_size; } /** - * @brief Broadcast or rebroadcast current round soft votes and previous round next votes - * @param rebroadcast + * @brief Test/enforce broadcastVotes() to actually send votes */ - void broadcastSoftAndNextVotes(bool rebroadcast); + void testBroadcatVotesFunctionality(); + private: /** - * @brief Broadcast or rebroadcast reward votes - * @param rebroadcast + * @brief Broadcast or rebroadcast 2t+1 soft/reward/previous round next votes + all own votes if needed */ - void broadcastRewardVotes(bool rebroadcast); + void broadcastVotes(); - private: /** * @brief Check PBFT blocks syncing queue. If there are synced PBFT blocks in queue, push it to PBFT chain */ @@ -336,16 +329,6 @@ class PbftManager : public std::enable_shared_from_this { */ void sleep_(); - /** - * @brief Go to next PBFT state. Only to be used for unit tests - */ - void doNextState_(); - - /** - * @brief Set next PBFT state - */ - void setNextState_(); - /** * @brief Set PBFT filter state */ @@ -471,6 +454,13 @@ class PbftManager : public std::enable_shared_from_this { */ bool validatePbftBlock(const std::shared_ptr &pbft_block) const; + /** + * @brief Validates pbft block state root. It checks if: + * @param pbft_block PBFT block + * @return true if pbft block is valid, otherwise false + */ + bool validatePbftBlockStateRoot(const std::shared_ptr &pbft_block) const; + /** * @brief If there are enough certify votes, push the vote PBFT block in PBFT chain * @param pbft_block PBFT block @@ -552,24 +542,20 @@ class PbftManager : public std::enable_shared_from_this { const addr_t node_addr_; const secret_t node_sk_; - const std::chrono::milliseconds LAMBDA_ms_MIN; - std::chrono::milliseconds LAMBDA_ms{0}; - uint64_t LAMBDA_backoff_multiple = 1; + const std::chrono::milliseconds kMinLambda; // [ms] + std::chrono::milliseconds lambda_{0}; // [ms] const std::chrono::milliseconds kMaxLambda{60000}; // in ms, max lambda is 1 minutes const uint32_t kBroadcastVotesLambdaTime = 20; const uint32_t kRebroadcastVotesLambdaTime = 60; - uint32_t broadcast_soft_next_votes_counter_ = 1; - uint32_t rebroadcast_soft_next_votes_counter_ = 1; + uint32_t broadcast_votes_counter_ = 1; + uint32_t rebroadcast_votes_counter_ = 1; uint32_t broadcast_reward_votes_counter_ = 1; uint32_t rebroadcast_reward_votes_counter_ = 1; - std::default_random_engine random_engine_{std::random_device{}()}; - PbftStates state_ = value_proposal_state; std::atomic round_ = 1; PbftStep step_ = 1; - PbftStep startingStepInRound_ = 1; // Block that node cert voted std::optional> cert_voted_block_for_round_{}; @@ -588,6 +574,10 @@ class PbftManager : public std::enable_shared_from_this { bool go_finish_state_ = false; bool loop_back_finish_state_ = false; + // Used to avoid cyclic logging in voting steps that are called repeatedly + bool printSecondFinishStepInfo_ = true; + bool printCertStepInfo_ = true; + const blk_hash_t dag_genesis_block_hash_; const PbftConfig &config_; diff --git a/libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp b/libraries/core_libs/consensus/include/rewards/block_stats.hpp similarity index 72% rename from libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp rename to libraries/core_libs/consensus/include/rewards/block_stats.hpp index ee925e16db..2e80eff84b 100644 --- a/libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/block_stats.hpp @@ -7,29 +7,33 @@ #include "pbft/period_data.hpp" #include "vote/vote.hpp" -namespace taraxa { +namespace taraxa::rewards { /** * @class RewardsStats * @brief RewardsStats contains rewards statistics for single pbft block */ -class RewardsStats { +class BlockStats { public: + // Needed for RLP + BlockStats() = default; /** - * @brief Process PeriodData and returns vector of validators, who included provided block.transactions as first in - * dag block, e.g. returned validator on position 2 included transaction block.transactions[2] as first in his dag - * block + * @brief setting block_author_, max_votes_weight_ and calls processStats function * - * @param block * @param dpos_vote_count - votes count for previous block * @param committee_size - * @return vector of validators */ - std::vector processStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size); + BlockStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size); HAS_RLP_FIELDS private: + /** + * @brief Process PeriodData and save stats in class for future serialization. returns + * + * @param block + */ + void processStats(const PeriodData& block); /** * @brief In case unique tx_hash is provided, it is mapped to it's validator's address + validator's unique txs count * is incremented. If provided tx_hash was already processed, nothing happens @@ -56,16 +60,7 @@ class RewardsStats { */ bool addVote(const std::shared_ptr& vote); - /** - * @brief Prepares reward statistics bases on period data data - * - * @param sync_blk - * @param dpos_vote_count - votes count for previous block - * @param committee_size - */ - void initStats(const PeriodData& sync_blk, uint64_t dpos_vote_count, uint32_t committee_size); - - private: + protected: struct ValidatorStats { // count of rewardable(with 1 or more unique transactions) DAG blocks produced by this validator uint32_t dag_blocks_count_ = 0; @@ -76,8 +71,15 @@ class RewardsStats { HAS_RLP_FIELDS }; + // Pbft block author + addr_t block_author_; + // Transactions validators: tx hash -> validator that included it as first in his block - std::unordered_map txs_validators_; + std::unordered_map validator_by_tx_hash_; + + // Vector with all transactions validators, who included provided block.transactions as first in dag block, + // e.g. returned validator on position 2 included transaction block.transactions[2] as first in his dag block + std::vector txs_validators_; // Txs stats: validator -> ValidatorStats std::unordered_map validators_stats_; @@ -92,4 +94,4 @@ class RewardsStats { uint64_t max_votes_weight_{0}; }; -} // namespace taraxa \ No newline at end of file +} // namespace taraxa::rewards \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp new file mode 100644 index 0000000000..22fca73188 --- /dev/null +++ b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp @@ -0,0 +1,54 @@ +#include "config/hardfork.hpp" +#include "rewards/block_stats.hpp" +#include "storage/storage.hpp" + +namespace taraxa::rewards { +/* + * @brief class that is managing rewards stats processing and hardforks(intervals changes) + * So intermediate blocks stats are stored in the vector in data(to restore on the node restart) + * and full list of interval stats is returned in the end of interval + */ +class Stats { + public: + Stats(uint32_t committee_size, const Hardforks::RewardsDistributionMap& rdm, std::shared_ptr db, + std::function&& dpos_eligible_total_vote_count); + + /* + * @brief processing passed block and returns stats that should be processed at this block + * @param current_blk block to process + * @return vector that should be processed at current block + */ + std::vector processStats(const PeriodData& current_blk); + + protected: + /* + * @brief load current interval stats from database + */ + void loadFromDb(); + /* + * @brief returns rewards distribution frequency for specified period + */ + uint32_t getCurrentDistributionFrequency(uint64_t current_period) const; + /* + * @brief gets all needed data and makes(processes) BlocksStats + * @param current_blk block to process + * @return block statistics needed for rewards distribution + */ + BlockStats getBlockStats(const PeriodData& current_blk); + /* + * @brief saves stats to database to not lose this data in case of node restart + */ + void saveBlockStats(uint64_t number, const BlockStats& stats); + /* + * @brief called on start of new rewards interval. clears blocks_stats_ collection + * and removes all data saved in db column + */ + void clear(); + + const uint32_t kCommitteeSize; + const Hardforks::RewardsDistributionMap kRewardsDistributionFrequency; + std::shared_ptr db_; + const std::function dpos_eligible_total_vote_count_; + std::vector blocks_stats_; +}; +} // namespace taraxa::rewards \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/transaction/transaction_queue.hpp b/libraries/core_libs/consensus/include/transaction/transaction_queue.hpp index 2b1d167989..eff42ef513 100644 --- a/libraries/core_libs/consensus/include/transaction/transaction_queue.hpp +++ b/libraries/core_libs/consensus/include/transaction/transaction_queue.hpp @@ -144,7 +144,7 @@ class TransactionQueue { // If transactions are dropped within last kTransactionOverflowTimeLimit seconds, dag blocks with missing transactions // will not be treated as malicious - const std::chrono::seconds kTransactionOverflowTimeLimit{300}; + const std::chrono::seconds kTransactionOverflowTimeLimit{600}; // Limit when non proposable transactions expire const size_t kNonProposableTransactionsPeriodExpiryLimit = 10; diff --git a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp index 36f64a1291..ea3b24250b 100644 --- a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp @@ -19,6 +19,12 @@ struct VerifiedVotes { // Step votes std::map step_votes; + + // Greatest step, for which there is at least t+1 next votes - it is used for lambda exponential backoff: Usually + // when network gets stalled it is due to lack of 2t+1 voting power and steps keep increasing. When new node joins + // the network, it should catch up with the rest of nodes asap so we dont start exponentially backing of its lambda + // if it's current step is far behind network_t_plus_one_step (at least 1 third of network is at this step) + PbftStep network_t_plus_one_step{0}; }; } // namespace taraxa \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index 8d904c22e0..b387810031 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -96,13 +96,16 @@ class VoteManager { std::optional determineNewRound(PbftPeriod current_pbft_period, PbftRound current_pbft_round); /** - * @brief Replace current reward votes info with new period, round & block hash based on vote + * @brief Replace current reward votes with new period, round & block hash based on vote * * @param period * @param round + * @param step * @param block_hash + * @param batch */ - void resetRewardVotesInfo(PbftPeriod period, PbftRound round, const blk_hash_t& block_hash); + void resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, + DbStorage::Batch& batch); /** * @brief Check reward votes for specified pbft block @@ -115,11 +118,11 @@ class VoteManager { bool copy_votes); /** - * @brief Get reward votes from reward_votes_ with the round during which was the previous block pushed + * @brief Get reward votes with the round during which was the previous block pushed * * @return vector of reward votes */ - std::vector> getProposeRewardVotes(); + std::vector> getRewardVotes(); /** * @brief Get current reward votes pbft block period @@ -128,6 +131,25 @@ class VoteManager { */ PbftPeriod getRewardVotesPbftBlockPeriod(); + /** + * @brief Saves own verified vote into memory and db + * + * @param vote + */ + void saveOwnVerifiedVote(const std::shared_ptr& vote); + + /** + * @return all own verified votes + */ + std::vector> getOwnVerifiedVotes(); + + /** + * @brief Clear own verified votes + * + * @param write_batch + */ + void clearOwnVerifiedVotes(DbStorage::Batch& write_batch); + /** * @brief Place a vote, save it in the verified votes queue, and gossip to peers * @param blockhash vote on PBFT block hash @@ -197,23 +219,19 @@ class VoteManager { * @param period * @param round * @param type - * @param peer_filter if specified, get only votes that are unknown for peer * @return vector of votes if 2t+1 voted block votes found, otherwise empty vector */ - std::vector> getTwoTPlusOneVotedBlockVotes( - PbftPeriod period, PbftRound round, TwoTPlusOneVotedBlockType type, - const std::shared_ptr& peer_filter = {}) const; + std::vector> getTwoTPlusOneVotedBlockVotes(PbftPeriod period, PbftRound round, + TwoTPlusOneVotedBlockType type) const; /** * Get all 2t+1 voted block next votes(both for null block as well as specific block) for specific period and round * * @param period * @param round - * @param peer_filter if specified, get only votes that are unknown for peer * @return vector of next votes if 2t+1 voted block votes found, otherwise empty vector */ - std::vector> getAllTwoTPlusOneNextVotes( - PbftPeriod period, PbftRound round, const std::shared_ptr& peer_filter = {}) const; + std::vector> getAllTwoTPlusOneNextVotes(PbftPeriod period, PbftRound round) const; /** * @brief Sets current pbft period & round. It also checks if we dont alredy have 2t+1 vote bundles(pf any type) for @@ -224,6 +242,17 @@ class VoteManager { */ void setCurrentPbftPeriodAndRound(PbftPeriod pbft_period, PbftRound pbft_round); + /** + * @brief Returns greatest step (in specified period & round), for which there is at least t+1 voting power + * from all nodes + * @note It is used for triggering lambda exponential backoff + * + * @param period + * @param round + * @return greatest network 2t+1 next voting step + */ + PbftStep getNetworkTplusOneNextVotingStep(PbftPeriod period, PbftRound round) const; + private: /** * @param vote @@ -273,8 +302,12 @@ class VoteManager { blk_hash_t reward_votes_block_hash_; PbftRound reward_votes_period_; PbftRound reward_votes_round_; + std::vector extra_reward_votes_; mutable std::shared_mutex reward_votes_info_mutex_; + // Own votes generated during current period & round + std::vector> own_verified_votes_; + // Cache for current 2T+1 - > // !!! Important: do not access it directly as it is not updated automatically, always call getPbftTwoTPlusOne instead // !!! diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index c7d397b5b9..416a8f6906 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -316,10 +316,7 @@ DagBlock DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, trx_hashes.push_back(trx->getHash()); } - uint64_t block_estimation = 0; - for (const auto& e : estimations) { - block_estimation += e; - } + const uint64_t block_estimation = std::accumulate(estimations.begin(), estimations.end(), 0); // If number of tips is over the limit filter by producer and level if (frontier.tips.size() > kDagBlockMaxTips || (frontier.tips.size() + 1) > kPbftGasLimit / kDagGasLimit) { diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index bfe041cdd4..82c91cdda0 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -287,6 +287,8 @@ void DagManager::clearLightNodeHistory() { // Actual history size will be between 100% and 110% of light_node_history_ to avoid deleting on every period if (((period_ % (std::max(light_node_history_ / 10, (uint64_t)1)) == 0)) && period_ > light_node_history_ && dag_expiry_level_ > max_levels_per_period_ + 1) { + // This will happen at most once a day so log a silent log + LOG(log_si_) << "Clear light node history"; const auto proposal_period = db_->getProposalPeriodForDagLevel(dag_expiry_level_ - max_levels_per_period_ - 1); assert(proposal_period); @@ -299,6 +301,7 @@ void DagManager::clearLightNodeHistory() { << " *proposal_period " << *proposal_period; LOG(log_tr_) << "Delete period history from: " << start << " to " << end; db_->clearPeriodDataHistory(end); + LOG(log_si_) << "Clear light node history completed"; } } diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 9c2c73e0ad..752e256d27 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -6,20 +6,21 @@ #include "common/constants.hpp" #include "common/thread_pool.hpp" #include "final_chain/cache.hpp" -#include "final_chain/rewards_stats.hpp" #include "final_chain/trie_common.hpp" +#include "rewards/rewards_stats.hpp" #include "vote/vote.hpp" namespace taraxa::final_chain { class FinalChainImpl final : public FinalChain { std::shared_ptr db_; - const uint32_t kCommitteeSize; const uint64_t kBlockGasLimit; StateAPI state_api_; const bool kLightNode = false; const uint64_t kLightNodeHistory = 0; const uint32_t kMaxLevelsPerPeriod; + const uint32_t kRewardsDistributionInterval = 100; + rewards::Stats rewards_; // It is not prepared to use more then 1 thread. Examine it if you want to change threads count boost::asio::thread_pool executor_thread_{1}; @@ -49,7 +50,6 @@ class FinalChainImpl final : public FinalChain { public: FinalChainImpl(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr) : db_(db), - kCommitteeSize(config.genesis.pbft.committee_size), kBlockGasLimit(config.genesis.pbft.gas_limit), state_api_([this](auto n) { return block_hash(n).value_or(ZeroHash()); }, // config.genesis.state, config.opts_final_chain, @@ -59,6 +59,8 @@ class FinalChainImpl final : public FinalChain { kLightNode(config.is_light_node), kLightNodeHistory(config.light_node_history), kMaxLevelsPerPeriod(config.max_levels_per_period), + rewards_(config.genesis.pbft.committee_size, config.genesis.state.hardforks.rewards_distribution_frequency, db_, + [this](EthBlockNumber n) { return dpos_eligible_total_vote_count(n); }), block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_header(blk); }), block_hashes_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_hash(blk); }), @@ -119,6 +121,13 @@ class FinalChainImpl final : public FinalChain { } delegation_delay_ = config.genesis.state.dpos.delegation_delay; + const auto kPruneblocksToKeep = kDagExpiryLevelLimit + kMaxLevelsPerPeriod + 1; + if ((config.db_config.prune_state_db || kLightNode) && last_blk_num.has_value() && + *last_blk_num > kPruneblocksToKeep) { + LOG(log_si_) << "Pruning state db, this might take several minutes"; + prune(*last_blk_num - kPruneblocksToKeep); + LOG(log_si_) << "Pruning state db complete"; + } } void stop() override { executor_thread_.join(); } @@ -142,14 +151,7 @@ class FinalChainImpl final : public FinalChain { std::shared_ptr&& anchor) { auto batch = db_->createWriteBatch(); - RewardsStats rewards_stats; - uint64_t dpos_vote_count = kCommitteeSize; - // Block zero - if (!new_blk.previous_block_cert_votes.empty()) [[unlikely]] { - dpos_vote_count = dpos_eligible_total_vote_count(new_blk.previous_block_cert_votes[0]->getPeriod() - 1); - } - // returns list of validators for new_blk.transactions - const std::vector txs_validators = rewards_stats.processStats(new_blk, dpos_vote_count, kCommitteeSize); + auto rewards_stats = rewards_.processStats(new_blk); block_applying_emitter_.emit(block_header()->number + 1); @@ -173,7 +175,7 @@ class FinalChainImpl final : public FinalChain { auto const& [exec_results, state_root, total_reward] = state_api_.transition_state({new_blk.pbft_blk->getBeneficiary(), kBlockGasLimit, new_blk.pbft_blk->getTimestamp(), BlockHeader::difficulty()}, - to_state_api_transactions(new_blk.transactions), txs_validators, {}, rewards_stats); + to_state_api_transactions(new_blk.transactions), rewards_stats); TransactionReceipts receipts; receipts.reserve(exec_results.size()); @@ -181,9 +183,9 @@ class FinalChainImpl final : public FinalChain { for (auto const& r : exec_results) { LogEntries logs; logs.reserve(r.logs.size()); - for (auto const& l : r.logs) { - logs.emplace_back(LogEntry{l.address, l.topics, l.data}); - } + std::transform(r.logs.cbegin(), r.logs.cend(), std::back_inserter(logs), [](const auto& l) { + return LogEntry{l.address, l.topics, l.data}; + }); receipts.emplace_back(TransactionReceipt{ r.code_err.empty() && r.consensus_err.empty(), r.gas_used, @@ -237,34 +239,32 @@ class FinalChainImpl final : public FinalChain { state_api_.create_snapshot(blk_header->number); } - if (kLightNode) { - // Actual history size will be between 100% and 105% of light_node_history_ to avoid deleting on every period - if (((blk_header->number % (std::max(kLightNodeHistory / 20, (uint64_t)1)) == 0)) && - blk_header->number > kLightNodeHistory) { - prune(blk_header->number - kLightNodeHistory); - } - } return result; } void prune(EthBlockNumber blk_n) override { - std::vector state_root_to_prune; - const auto last_block_to_keep = get_block_header(blk_n); + LOG(log_nf_) << "Pruning data older than " << blk_n; + auto last_block_to_keep = get_block_header(blk_n); if (last_block_to_keep) { - LOG(log_nf_) << "Pruning data older than " << blk_n; + auto block_to_keep = last_block_to_keep; + std::vector state_root_to_keep; + while (block_to_keep) { + state_root_to_keep.push_back(block_to_keep->state_root); + block_to_keep = get_block_header(block_to_keep->number + 1); + } auto block_to_prune = get_block_header(last_block_to_keep->number - 1); while (block_to_prune && block_to_prune->number > 0) { - state_root_to_prune.push_back(block_to_prune->state_root); db_->remove(DB::Columns::final_chain_blk_by_number, block_to_prune->number); db_->remove(DB::Columns::final_chain_blk_hash_by_number, block_to_prune->number); db_->remove(DB::Columns::final_chain_blk_number_by_hash, block_to_prune->hash); block_to_prune = get_block_header(block_to_prune->number - 1); } - state_api_.prune(last_block_to_keep->state_root, state_root_to_prune, last_block_to_keep->number); db_->compactColumn(DB::Columns::final_chain_blk_by_number); db_->compactColumn(DB::Columns::final_chain_blk_hash_by_number); db_->compactColumn(DB::Columns::final_chain_blk_number_by_hash); + + state_api_.prune(state_root_to_keep, last_block_to_keep->number); } } @@ -309,16 +309,8 @@ class FinalChainImpl final : public FinalChain { chunk_to_alter[index % c_bloomIndexSize] |= log_bloom_for_index; db_->insert(batch, DB::Columns::final_chain_log_blooms_index, chunk_id, util::rlp_enc(rlp_strm, chunk_to_alter)); } - TransactionLocation tl{blk_header.number}; - for (auto const& trx : transactions) { - db_->insert(batch, DB::Columns::final_chain_transaction_location_by_hash, trx->getHash(), - util::rlp_enc(rlp_strm, tl)); - ++tl.index; - } db_->insert(batch, DB::Columns::final_chain_transaction_hashes_by_blk_number, blk_header.number, - TransactionHashesImpl::serialize_from_transactions(transactions)); - db_->insert(batch, DB::Columns::final_chain_transaction_count_by_blk_number, blk_header.number, - transactions.size()); + dev::rlp(hashes_from_transactions(transactions))); db_->insert(batch, DB::Columns::final_chain_blk_hash_by_number, blk_header.number, blk_header.hash); db_->insert(batch, DB::Columns::final_chain_blk_number_by_hash, blk_header.hash, blk_header.number); db_->insert(batch, DB::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, blk_header.number); @@ -343,14 +335,12 @@ class FinalChainImpl final : public FinalChain { return block_headers_cache_.get(*n); } - std::optional transaction_location(h256 const& trx_hash) const override { - auto raw = db_->lookup(trx_hash, DB::Columns::final_chain_transaction_location_by_hash); - if (raw.empty()) { + std::optional transaction_location(const h256& trx_hash) const override { + const auto period = db_->getTransactionPeriod(trx_hash); + if (!period) { return {}; } - TransactionLocation ret; - ret.rlp(dev::RLP(raw)); - return ret; + return TransactionLocation{period->first, period->second}; } std::optional transaction_receipt(h256 const& trx_h) const override { @@ -364,8 +354,7 @@ class FinalChainImpl final : public FinalChain { } uint64_t transactionCount(std::optional n = {}) const override { - return db_->lookup_int(last_if_absent(n), DB::Columns::final_chain_transaction_count_by_blk_number) - .value_or(0); + return db_->getTransactionCount(last_if_absent(n)); } std::shared_ptr transaction_hashes(std::optional n = {}) const override { @@ -419,17 +408,17 @@ class FinalChainImpl final : public FinalChain { trx); } - std::string trace_trx(const state_api::EVMTransaction& trx, EthBlockNumber blk_n, - std::optional params = {}) const override { + std::string trace(std::vector trxs, EthBlockNumber blk_n, + std::optional params = {}) const override { const auto blk_header = block_header(last_if_absent(blk_n)); - return dev::asString(state_api_.trace_transaction(blk_header->number, - { - blk_header->author, - blk_header->gas_limit, - blk_header->timestamp, - BlockHeader::difficulty(), - }, - trx, params)); + return dev::asString(state_api_.trace(blk_header->number, + { + blk_header->author, + blk_header->gas_limit, + blk_header->timestamp, + BlockHeader::difficulty(), + }, + trxs, params)); } uint64_t dpos_eligible_total_vote_count(EthBlockNumber blk_num) const override { @@ -449,17 +438,18 @@ class FinalChainImpl final : public FinalChain { } private: - std::shared_ptr get_transaction_hashes(std::optional n = {}) const { - return make_shared( - db_->lookup(last_if_absent(n), DB::Columns::final_chain_transaction_hashes_by_blk_number)); + std::shared_ptr get_transaction_hashes(std::optional n = {}) const { + auto res = db_->lookup(last_if_absent(n), DB::Columns::final_chain_transaction_hashes_by_blk_number); + + return std::make_shared(util::rlp_dec(dev::RLP(res))); } const SharedTransactions get_transactions(std::optional n = {}) const { SharedTransactions ret; auto hashes = transaction_hashes(n); - ret.reserve(hashes->count()); + ret.reserve(hashes->size()); for (size_t i = 0; i < ret.capacity(); ++i) { - auto trx = db_->getTransaction(hashes->get(i)); + auto trx = db_->getTransaction(hashes->at(i)); assert(trx); ret.emplace_back(trx); } @@ -525,31 +515,6 @@ class FinalChainImpl final : public FinalChain { } return ret; } - - struct TransactionHashesImpl : TransactionHashes { - string serialized_; - size_t count_; - - explicit TransactionHashesImpl(string serialized) - : serialized_(std::move(serialized)), count_(serialized_.size() / h256::size) {} - - static bytes serialize_from_transactions(SharedTransactions const& transactions) { - bytes serialized; - serialized.reserve(transactions.size() * h256::size); - for (auto const& trx : transactions) { - for (auto b : trx->getHash()) { - serialized.push_back(b); - } - } - return serialized; - } - - h256 get(size_t i) const override { - return h256((uint8_t*)(serialized_.data() + i * h256::size), h256::ConstructFromPointer); - } - - size_t count() const override { return count_; } - }; }; std::shared_ptr NewFinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index cbf46340e7..3e29cf8b97 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -21,7 +21,7 @@ void from_rlp(taraxa_evm_Bytes b, Result& result) { util::rlp(dev::RLP(map_bytes(b), 0), result); } -void to_str(taraxa_evm_Bytes b, string& result) { result = {(char*)b.Data, b.Len}; } +void to_str(taraxa_evm_Bytes b, string& result) { result = {reinterpret_cast(b.Data), b.Len}; } void to_bytes(taraxa_evm_Bytes b, bytes& result) { result.assign(b.Data, b.Data + b.Len); } @@ -31,7 +31,7 @@ template taraxa_evm_BytesCallback decoder_cb_c(Result& res) { return { &res, - [](auto receiver, auto b) { decode(b, *(Result*)receiver); }, + [](auto receiver, auto b) { decode(b, *static_cast(receiver)); }, }; } @@ -142,11 +142,6 @@ void StateAPI::update_state_config(const Config& new_config) { err_h.check(); } -Proof StateAPI::prove(EthBlockNumber blk_num, const root_t& state_root, const addr_t& addr, - const std::vector& keys) const { - return c_method_args_rlp(this_c_, blk_num, state_root, addr, keys); -} - std::optional StateAPI::get_account(EthBlockNumber blk_num, const addr_t& addr) const { return c_method_args_rlp, from_rlp, taraxa_evm_state_api_get_account>(this_c_, blk_num, addr); } @@ -165,14 +160,10 @@ ExecutionResult StateAPI::dry_run_transaction(EthBlockNumber blk_num, const EVMB trx); } -bytes StateAPI::trace_transaction(EthBlockNumber blk_num, const EVMBlock& blk, const EVMTransaction& trx, - std::optional params) const { - if (params) { - return c_method_args_rlp(this_c_, blk_num, blk, trx, - *params); - } else { - return c_method_args_rlp(this_c_, blk_num, blk, trx); - } +bytes StateAPI::trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trxs, + std::optional params) const { + return c_method_args_rlp(this_c_, blk_num, blk, trxs, + params); } StateDescriptor StateAPI::get_last_committed_state_descriptor() const { @@ -186,14 +177,11 @@ StateDescriptor StateAPI::get_last_committed_state_descriptor() const { const StateTransitionResult& StateAPI::transition_state(const EVMBlock& block, const util::RangeView& transactions, - const util::RangeView& transactions_validators, - const util::RangeView& uncles, - const RewardsStats& rewards_stats) { + const std::vector& rewards_stats) { result_buf_transition_state_.execution_results.clear(); rlp_enc_transition_state_.clear(); c_method_args_rlp( - this_c_, rlp_enc_transition_state_, result_buf_transition_state_, block, transactions, transactions_validators, - uncles, rewards_stats); + this_c_, rlp_enc_transition_state_, result_buf_transition_state_, block, transactions, rewards_stats); return result_buf_transition_state_; } @@ -213,9 +201,8 @@ void StateAPI::create_snapshot(PbftPeriod period) { err_h.check(); } -void StateAPI::prune(const dev::h256& state_root_to_keep, const std::vector& state_root_to_prune, - EthBlockNumber blk_num) { - return c_method_args_rlp(this_c_, state_root_to_keep, state_root_to_prune, blk_num); +void StateAPI::prune(const std::vector& state_root_to_keep, EthBlockNumber blk_num) { + return c_method_args_rlp(this_c_, state_root_to_keep, blk_num); } uint64_t StateAPI::dpos_eligible_total_vote_count(EthBlockNumber blk_num) const { diff --git a/libraries/core_libs/consensus/src/final_chain/state_api_data.cpp b/libraries/core_libs/consensus/src/final_chain/state_api_data.cpp index 17a84e3593..e41715dd2e 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api_data.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api_data.cpp @@ -18,8 +18,6 @@ RLP_FIELDS_DEFINE(LogRecord, address, topics, data) RLP_FIELDS_DEFINE(ExecutionResult, code_retval, new_contract_addr, logs, gas_used, code_err, consensus_err) RLP_FIELDS_DEFINE(StateTransitionResult, execution_results, state_root, total_reward) RLP_FIELDS_DEFINE(Account, nonce, balance, storage_root_hash, code_hash, code_size) -RLP_FIELDS_DEFINE(TrieProof, value, nodes) -RLP_FIELDS_DEFINE(Proof, account_proof, storage_proofs) RLP_FIELDS_DEFINE(StateDescriptor, blk_num, state_root) RLP_FIELDS_DEFINE(Tracing, vmTrace, trace, stateDiff) } // namespace taraxa::state_api \ No newline at end of file diff --git a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp index fe1464e31c..220707fc2f 100644 --- a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp +++ b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp @@ -19,7 +19,7 @@ std::shared_ptr KeyManager::get(EthBlockNumber blk_n, con std::unique_lock lock(mutex_); return key_map_.insert_or_assign(addr, std::make_shared(std::move(key))).first->second; } - } catch (state_api::ErrFutureBlock& e) { + } catch (state_api::ErrFutureBlock&) { return nullptr; } diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 6cf167aecc..e02c55c8fd 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -41,7 +41,7 @@ PbftManager::PbftManager(const PbftConfig &conf, const blk_hash_t &dag_genesis_b final_chain_(std::move(final_chain)), node_addr_(std::move(node_addr)), node_sk_(std::move(node_sk)), - LAMBDA_ms_MIN(conf.lambda_ms), + kMinLambda(conf.lambda_ms), dag_genesis_block_hash_(dag_genesis_block_hash), config_(conf), proposed_blocks_(db_) { @@ -124,65 +124,6 @@ void PbftManager::resume() { daemon_ = std::make_unique([this]() { run(); }); } -// Only to be used for tests... -void PbftManager::resumeSingleState() { - if (!stopped_.load()) daemon_->join(); - stopped_ = false; - - if (step_ == 1) { - state_ = value_proposal_state; - } else if (step_ == 2) { - state_ = filter_state; - } else if (step_ == 3) { - state_ = certify_state; - } else if (step_ % 2 == 0) { - state_ = finish_state; - } else { - state_ = finish_polling_state; - } - - doNextState_(); -} - -// Only to be used for tests... -void PbftManager::doNextState_() { - auto initial_state = state_; - - while (!stopped_ && state_ == initial_state) { - if (stateOperations_()) { - continue; - } - - // PBFT states - switch (state_) { - case value_proposal_state: - proposeBlock_(); - break; - case filter_state: - identifyBlock_(); - break; - case certify_state: - certifyBlock_(); - break; - case finish_state: - firstFinish_(); - break; - case finish_polling_state: - secondFinish_(); - break; - default: - LOG(log_er_) << "Unknown PBFT state " << state_; - assert(false); - } - - setNextState_(); - if (state_ != initial_state) { - return; - } - sleep_(); - } -} - /* When a node starts up it has to sync to the current phase (type of block * being generated) and step (within the block generation round) * Five step loop for block generation over three phases of blocks @@ -200,25 +141,41 @@ void PbftManager::run() { switch (state_) { case value_proposal_state: proposeBlock_(); + setFilterState_(); break; case filter_state: identifyBlock_(); + setCertifyState_(); break; case certify_state: certifyBlock_(); + if (go_finish_state_) { + setFinishState_(); + } else { + next_step_time_ms_ += kPollingIntervalMs; + } break; case finish_state: firstFinish_(); + setFinishPollingState_(); break; case finish_polling_state: secondFinish_(); + if (loop_back_finish_state_) { + loopBackFinishState_(); + + // Print voting summary for current round + printVotingSummary(); + } else { + next_step_time_ms_ += kPollingIntervalMs; + } break; default: LOG(log_er_) << "Unknown PBFT state " << state_; assert(false); } - setNextState_(); + LOG(log_tr_) << "next step time(ms): " << next_step_time_ms_.count() << ", step " << step_; sleep_(); } } @@ -288,31 +245,44 @@ void PbftManager::setPbftStep(PbftStep pbft_step) { db_->savePbftMgrField(PbftMgrField::Step, pbft_step); step_ = pbft_step; - if (step_ > kMaxSteps && LAMBDA_backoff_multiple < 8) { - // Note: We calculate the lambda for a step independently of prior steps - // in case missed earlier steps. - std::uniform_int_distribution distribution(0, step_ - kMaxSteps); - auto lambda_random_count = distribution(random_engine_); - LAMBDA_backoff_multiple = 2 * LAMBDA_backoff_multiple; - LAMBDA_ms = LAMBDA_ms_MIN * (LAMBDA_backoff_multiple + lambda_random_count); - if (LAMBDA_ms > kMaxLambda) { - LAMBDA_ms = kMaxLambda; - } + // Increase lambda only for odd steps (second finish steps) after node reached kMaxSteps steps + if (step_ > kMaxSteps && step_ % 2) { + const auto [round, period] = getPbftRoundAndPeriod(); + const auto network_next_voting_step = vote_mgr_->getNetworkTplusOneNextVotingStep(period, round); + + // Node is still >= kMaxSteps steps behind the rest (at least 1/3) of the network - keep lambda at the standard + // value so node can catch up with the rest of the nodes + + // To get withing 1 round with the rest of the network - node cannot start exponentially backing off its lambda + // exactly when it is kMaxSteps behind the network as it would reach kMaxLambda lambda time before catching up. If + // we delay triggering exponential backoff by 4 steps, node should get within 1 round with the network. + // !!! Important: This is true only for values kMinLambda = 15000ms and kMaxLambda = 60000 ms + if (network_next_voting_step > step_ && network_next_voting_step - step_ >= kMaxSteps - 4 /* hardcoded delay */) { + // Reset it only if it was already increased compared to default value + if (lambda_ != kMinLambda) { + lambda_ = kMinLambda; + LOG(log_nf_) << "Node is " << network_next_voting_step - step_ + << " steps behind the rest of the network. Reset lambda to the default value " << lambda_.count() + << " [ms]"; + } + } else if (lambda_ < kMaxLambda) { + // Node is < kMaxSteps steps behind the rest (at least 1/3) of the network - start exponentially backing off + // lambda until it reaches kMaxLambdagetNetworkTplusOneNextVotingStep + // Note: We calculate the lambda for a step independently of prior steps in case missed earlier steps. + lambda_ *= 2; + if (lambda_ > kMaxLambda) { + lambda_ = kMaxLambda; + } - LOG(log_dg_) << "Surpassed max steps, exponentially backing off lambda to " << LAMBDA_ms.count() << " ms in round " - << getPbftRound() << ", step " << step_; - } else { - LAMBDA_ms = LAMBDA_ms_MIN; - LAMBDA_backoff_multiple = 1; + LOG(log_nf_) << "No round progress - exponentially backing off lambda to " << lambda_.count() << " [ms] in step " + << step_; + } } } void PbftManager::resetStep() { step_ = 1; - startingStepInRound_ = 1; - - LAMBDA_ms = LAMBDA_ms_MIN; - LAMBDA_backoff_multiple = 1; + lambda_ = kMinLambda; } bool PbftManager::tryPushCertVotesBlock() { @@ -331,6 +301,11 @@ bool PbftManager::tryPushCertVotesBlock() { auto pbft_block = getValidPbftProposedBlock(current_pbft_period, certified_block_hash); if (!pbft_block) { LOG(log_er_) << "Invalid certified block " << certified_block_hash; + auto net = network_.lock(); + // If block/reward votes are missing but block is cert voted other nodes probably advanced, sync + if (net) { + net->restartSyncingPbft(); + } return false; } @@ -391,8 +366,8 @@ void PbftManager::resetPbftConsensus(PbftRound round) { LOG(log_dg_) << "Reset PBFT consensus to: period " << getPbftPeriod() << ", round " << round << ", step 1"; // Reset broadcast counters - broadcast_soft_next_votes_counter_ = 1; - rebroadcast_soft_next_votes_counter_ = 1; + broadcast_votes_counter_ = 1; + rebroadcast_votes_counter_ = 1; // Update current round and reset step to 1 round_ = round; @@ -412,8 +387,8 @@ void PbftManager::resetPbftConsensus(PbftRound round) { cert_voted_block_for_round_.reset(); } - // Remove all own votes generated in previous round - db_->clearOwnVerifiedVotes(batch); + // Clear all own votes generated in previous round + vote_mgr_->clearOwnVerifiedVotes(batch); db_->commitWriteBatch(batch); @@ -460,7 +435,7 @@ void PbftManager::initialState() { // Initial PBFT state // Time constants... - LAMBDA_ms = LAMBDA_ms_MIN; + lambda_ = kMinLambda; const auto current_pbft_period = getPbftPeriod(); const auto current_pbft_round = db_->getPbftMgrField(PbftMgrField::Round); @@ -486,8 +461,6 @@ void PbftManager::initialState() { assert(false); } - // This is used to offset endtime for second finishing step... - startingStepInRound_ = current_pbft_step; setPbftStep(current_pbft_step); round_ = current_pbft_round; @@ -539,55 +512,24 @@ void PbftManager::initialState() { : "no value"); } -void PbftManager::setNextState_() { - switch (state_) { - case value_proposal_state: - setFilterState_(); - break; - case filter_state: - setCertifyState_(); - break; - case certify_state: - if (go_finish_state_) { - setFinishState_(); - } else { - next_step_time_ms_ += kPollingIntervalMs; - } - break; - case finish_state: - setFinishPollingState_(); - break; - case finish_polling_state: - if (loop_back_finish_state_) { - loopBackFinishState_(); - } else { - next_step_time_ms_ += kPollingIntervalMs; - } - break; - default: - LOG(log_er_) << "Unknown PBFT state " << state_; - assert(false); - } - LOG(log_tr_) << "next step time(ms): " << next_step_time_ms_.count() << ", step " << step_; -} - void PbftManager::setFilterState_() { state_ = filter_state; setPbftStep(step_ + 1); - next_step_time_ms_ = 2 * LAMBDA_ms; + next_step_time_ms_ = 2 * lambda_; } void PbftManager::setCertifyState_() { state_ = certify_state; setPbftStep(step_ + 1); - next_step_time_ms_ = 2 * LAMBDA_ms; + next_step_time_ms_ = 2 * lambda_; + printCertStepInfo_ = true; } void PbftManager::setFinishState_() { LOG(log_dg_) << "Will go to first finish State"; state_ = finish_state; setPbftStep(step_ + 1); - next_step_time_ms_ = 4 * LAMBDA_ms; + next_step_time_ms_ = 4 * lambda_; } void PbftManager::setFinishPollingState_() { @@ -599,7 +541,9 @@ void PbftManager::setFinishPollingState_() { db_->commitWriteBatch(batch); already_next_voted_value_ = false; already_next_voted_null_block_hash_ = false; + printSecondFinishStepInfo_ = true; second_finish_step_start_datetime_ = std::chrono::system_clock::now(); + next_step_time_ms_ += kPollingIntervalMs; } void PbftManager::loopBackFinishState_() { @@ -611,56 +555,92 @@ void PbftManager::loopBackFinishState_() { db_->commitWriteBatch(batch); already_next_voted_value_ = false; already_next_voted_null_block_hash_ = false; - assert(step_ >= startingStepInRound_); next_step_time_ms_ += kPollingIntervalMs; - - // Print voting summary for current round - printVotingSummary(); } -void PbftManager::broadcastSoftAndNextVotes(bool rebroadcast) { +void PbftManager::broadcastVotes() { auto net = network_.lock(); if (!net) { + LOG(log_er_) << "Unable to broadcast votes -> cant obtain net ptr"; return; } - auto [round, period] = getPbftRoundAndPeriod(); + const auto votes_sync_packet_handler = net->getSpecificHandler(); - // Broadcast 2t+1 soft votes - auto soft_votes = vote_mgr_->getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::SoftVotedBlock); - if (!soft_votes.empty()) { - LOG(log_dg_) << "Broadcast soft votes for period " << period << ", round " << round; - net->getSpecificHandler()->onNewPbftVotesBundle(std::move(soft_votes), - rebroadcast); - } + // Send votes to the other peers + auto gossipVotes = [this, &votes_sync_packet_handler](std::vector> &&votes, + const std::string &votes_type_str, bool rebroadcast) { + if (!votes.empty()) { + LOG(log_dg_) << "Broadcast " << votes_type_str << " for period " << votes.back()->getPeriod() << ", round " + << votes.back()->getRound(); + votes_sync_packet_handler->onNewPbftVotesBundle(std::move(votes), rebroadcast); + } + }; - // Broadcast previous round 2t+1 next votes - if (round > 1) { - if (auto next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(period, round - 1); !next_votes.empty()) { - LOG(log_dg_) << "Broadcast next votes for period " << period << ", round " << round - 1; - net->getSpecificHandler()->onNewPbftVotesBundle(std::move(next_votes), - rebroadcast); + // (Re)broadcast 2t+1 soft/reward/previous round next votes + all own votes + auto broadcastVotes = [this, &net, &gossipVotes](bool rebroadcast) { + auto [round, period] = getPbftRoundAndPeriod(); + + // Broadcast 2t+1 soft votes + gossipVotes(vote_mgr_->getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::SoftVotedBlock), + "2t+1 soft votes", rebroadcast); + + // Broadcast reward votes - previous round 2t+1 cert votes + gossipVotes(vote_mgr_->getRewardVotes(), "2t+1 propose reward votes", rebroadcast); + + // Broadcast previous round 2t+1 next votes + if (round > 1) { + gossipVotes(vote_mgr_->getAllTwoTPlusOneNextVotes(period, round - 1), "2t+1 next votes", rebroadcast); } - } -} -void PbftManager::broadcastRewardVotes(bool rebroadcast) { - auto net = network_.lock(); - if (!net) { - return; - } + // Broadcast own votes + auto vote_packet_handler = net->getSpecificHandler(); + // TODO: this could be optimized to use VotesSyncPacketHandler if we drop some of the checks in process function + // Send votes by one as votes sync packet must contain votes with the same type, period and round + const auto &own_votes = vote_mgr_->getOwnVerifiedVotes(); + for (const auto &vote : own_votes) { + vote_packet_handler->onNewPbftVote(vote, getPbftProposedBlock(vote->getPeriod(), vote->getBlockHash()), + rebroadcast); + } + if (!own_votes.empty()) { + LOG(log_dg_) << "Broadcast own votes for period " << period << ", round " << round; + } + }; - auto [round, period] = getPbftRoundAndPeriod(); + const auto round_elapsed_time = elapsedTimeInMs(current_round_start_datetime_); + const auto period_elapsed_time = elapsedTimeInMs(current_period_start_datetime_); - // Broadcast reward votes - previous round 2t+1 cert votes - auto reward_votes = vote_mgr_->getProposeRewardVotes(); - if (!reward_votes.empty()) { - LOG(log_dg_) << "Broadcast propose reward votes for period " << period << ", round " << round; - net->getSpecificHandler()->onNewPbftVotesBundle(std::move(reward_votes), - rebroadcast); + if (round_elapsed_time / kMinLambda > kRebroadcastVotesLambdaTime * rebroadcast_votes_counter_) { + // Stalled in the same round for kRebroadcastVotesLambdaTime * kMinLambda time -> rebroadcast votes + broadcastVotes(true); + rebroadcast_votes_counter_++; + // If there was a rebroadcast no need to do next broadcast either + broadcast_votes_counter_++; + } else if (round_elapsed_time / kMinLambda > kBroadcastVotesLambdaTime * broadcast_votes_counter_) { + // Stalled in the same round for kBroadcastVotesLambdaTime * kMinLambda time -> broadcast votes + broadcastVotes(false); + broadcast_votes_counter_++; + } else if (period_elapsed_time / kMinLambda > kRebroadcastVotesLambdaTime * rebroadcast_reward_votes_counter_) { + // Stalled in the same period for kRebroadcastVotesLambdaTime * kMinLambda time -> rebroadcast reward votes + gossipVotes(vote_mgr_->getRewardVotes(), "2t+1 propose reward votes", true); + rebroadcast_reward_votes_counter_++; + // If there was a rebroadcast no need to do next broadcast either + broadcast_reward_votes_counter_++; + } else if (period_elapsed_time / kMinLambda > kBroadcastVotesLambdaTime * broadcast_reward_votes_counter_) { + // Stalled in the same period for kBroadcastVotesLambdaTime * kMinLambda time -> broadcast reward votes + gossipVotes(vote_mgr_->getRewardVotes(), "2t+1 propose reward votes", false); + broadcast_reward_votes_counter_++; } } +void PbftManager::testBroadcatVotesFunctionality() { + // Set these variables to force broadcastVotes() send votes + current_round_start_datetime_ = time_point{}; + current_period_start_datetime_ = time_point{}; + + broadcastVotes(); +} + void PbftManager::printVotingSummary() const { const auto [round, period] = getPbftRoundAndPeriod(); Json::Value json_obj; @@ -683,41 +663,25 @@ void PbftManager::printVotingSummary() const { } bool PbftManager::stateOperations_() { - pushSyncedPbftBlocksIntoChain(); - - const auto round_elapsed_time = elapsedTimeInMs(current_round_start_datetime_); - const auto period_elapsed_time = elapsedTimeInMs(current_period_start_datetime_); - - if (round_elapsed_time / LAMBDA_ms_MIN > kRebroadcastVotesLambdaTime * rebroadcast_soft_next_votes_counter_) { - broadcastSoftAndNextVotes(true); - rebroadcast_soft_next_votes_counter_++; - // If there was a rebroadcast no need to do next broadcast either - broadcast_soft_next_votes_counter_++; - } else if (round_elapsed_time / LAMBDA_ms_MIN > kBroadcastVotesLambdaTime * broadcast_soft_next_votes_counter_) { - broadcastSoftAndNextVotes(false); - broadcast_soft_next_votes_counter_++; - } - - // Reward votes need to be broadcast even if we are advancing rounds but unable to advance a period - if (period_elapsed_time / LAMBDA_ms_MIN > kRebroadcastVotesLambdaTime * rebroadcast_reward_votes_counter_) { - broadcastRewardVotes(true); - rebroadcast_reward_votes_counter_++; - // If there was a rebroadcast no need to do next broadcast either - broadcast_reward_votes_counter_++; - } else if (period_elapsed_time / LAMBDA_ms_MIN > kBroadcastVotesLambdaTime * broadcast_reward_votes_counter_) { - broadcastRewardVotes(false); - broadcast_reward_votes_counter_++; - } - auto [round, period] = getPbftRoundAndPeriod(); LOG(log_tr_) << "PBFT current round: " << round << ", period: " << period << ", step " << step_; - // Check if these is already 2t+1 cert votes for some valid block, if so - push it into the chain - if (tryPushCertVotesBlock()) { - return true; + // Process synced blocks + pushSyncedPbftBlocksIntoChain(); + + auto net = network_.lock(); + // Only broadcast votes and try to push cert voted block if node is not syncing + if (net && !net->pbft_syncing()) { + // (Re)broadcast votes if needed + broadcastVotes(); + + // Check if there is 2t+1 cert votes for some valid block, if so - push it into the chain + if (tryPushCertVotesBlock()) { + return true; + } } - // 2t+1 next votes were seen + // Check if there is 2t+1 next votes for some valid block, if so - advance round if (advanceRound()) { return true; } @@ -767,6 +731,9 @@ bool PbftManager::placeVote(const std::shared_ptr &vote, std::string_view gossipNewVote(vote, voted_block); + // Save own verified vote + vote_mgr_->saveOwnVerifiedVote(vote); + LOG(log_nf_) << "Placed " << log_vote_id << " " << vote->getHash() << " for block " << vote->getBlockHash() << ", vote weight " << *vote->getWeight() << ", period " << vote->getPeriod() << ", round " << vote->getRound() << ", step " << vote->getStep(); @@ -926,17 +893,21 @@ void PbftManager::identifyBlock_() { void PbftManager::certifyBlock_() { // The Certifying Step auto [round, period] = getPbftRoundAndPeriod(); - LOG(log_dg_) << "PBFT certifying state in period " << period << ", round " << round; + + if (printCertStepInfo_) { + LOG(log_dg_) << "PBFT certifying state in period " << period << ", round " << round; + printCertStepInfo_ = false; + } const auto elapsed_time_in_round = elapsedTimeInMs(current_round_start_datetime_); - go_finish_state_ = elapsed_time_in_round > 4 * LAMBDA_ms - kPollingIntervalMs; + go_finish_state_ = elapsed_time_in_round > 4 * lambda_ - kPollingIntervalMs; if (go_finish_state_) { LOG(log_dg_) << "Step 3 expired, will go to step 4 in period " << period << ", round " << round; return; } // Should not happen, add log here for safety checking - if (elapsed_time_in_round < 2 * LAMBDA_ms) { + if (elapsed_time_in_round < 2 * lambda_) { LOG(log_er_) << "PBFT Reached step 3 too quickly after only " << elapsed_time_in_round.count() << " [ms] in period " << period << ", round " << round; return; @@ -1043,9 +1014,11 @@ void PbftManager::firstFinish_() { void PbftManager::secondFinish_() { // Odd number steps from 5 are in second finish auto [round, period] = getPbftRoundAndPeriod(); - LOG(log_dg_) << "PBFT second finishing state in period " << period << ", round " << round << ", step " << step_; - assert(step_ >= startingStepInRound_); + if (printSecondFinishStepInfo_) { + LOG(log_dg_) << "PBFT second finishing state in period " << period << ", round " << round << ", step " << step_; + printSecondFinishStepInfo_ = false; + } // Lambda function for next voting 2t+1 soft voted block from current round auto next_vote_soft_voted_block = [this, period = period, round = round]() { @@ -1106,7 +1079,7 @@ void PbftManager::secondFinish_() { // Try to next vote 2t+1 next voted null block from previous round next_vote_null_block(); - loop_back_finish_state_ = elapsedTimeInMs(second_finish_step_start_datetime_) > 2 * (LAMBDA_ms - kPollingIntervalMs); + loop_back_finish_state_ = elapsedTimeInMs(second_finish_step_start_datetime_) > 2 * (lambda_ - kPollingIntervalMs); } std::optional, std::vector>>> PbftManager::generatePbftBlock( @@ -1114,7 +1087,7 @@ std::optional, std::vectorgetProposeRewardVotes(); + auto reward_votes = vote_mgr_->getRewardVotes(); if (propose_period > 1) [[likely]] { assert(!reward_votes.empty()); if (reward_votes[0]->getPeriod() != propose_period - 1) { @@ -1138,11 +1111,15 @@ std::optional, std::vector(prev_blk_hash, anchor_hash, order_hash, last_state_root, propose_period, + node_addr_, node_sk_, std::move(reward_votes_hashes)); - auto block = std::make_shared(prev_blk_hash, anchor_hash, order_hash, last_state_root, propose_period, - node_addr_, node_sk_, std::move(reward_votes_hashes)); - - return {std::make_pair(std::move(block), std::move(reward_votes))}; + return {std::make_pair(std::move(block), std::move(reward_votes))}; + } catch (const std::exception &e) { + LOG(log_er_) << "Block for period " << propose_period << " could not be proposed " << e.what(); + return {}; + } } void PbftManager::processProposedBlock(const std::shared_ptr &proposed_block, @@ -1347,20 +1324,9 @@ std::shared_ptr PbftManager::identifyLeaderBlock_(PbftRound round, Pb return empty_leader_block; } -bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block) const { - if (!pbft_block) { - LOG(log_er_) << "Unable to validate pbft block - no block provided"; - return false; - } - - // Validates pbft_block's previous block hash against pbft chain - if (!pbft_chain_->checkPbftBlockValidation(pbft_block)) { - return false; - } - - auto const &pbft_block_hash = pbft_block->getBlockHash(); - +bool PbftManager::validatePbftBlockStateRoot(const std::shared_ptr &pbft_block) const { auto period = pbft_block->getPeriod(); + auto const &pbft_block_hash = pbft_block->getBlockHash(); { h256 prev_state_root_hash; if (period > final_chain_->delegation_delay()) { @@ -1377,6 +1343,25 @@ bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block return false; } } + return true; +} + +bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block) const { + if (!pbft_block) { + LOG(log_er_) << "Unable to validate pbft block - no block provided"; + return false; + } + + // Validates pbft_block's previous block hash against pbft chain + if (!pbft_chain_->checkPbftBlockValidation(pbft_block)) { + return false; + } + + auto const &pbft_block_hash = pbft_block->getBlockHash(); + + if (!validatePbftBlockStateRoot(pbft_block)) { + return false; + } // Validates reward votes if (!vote_mgr_->checkRewardVotes(pbft_block, false).first) { @@ -1505,7 +1490,6 @@ void PbftManager::reorderTransactions(SharedTransactions &transactions) { // While iterating over transactions, account_nonce will keep the last nonce for the account std::unordered_map account_nonce; - std::unordered_map>> account_nonce_transactions; // Find accounts that need reordering and place in account_reverse_order set for (uint32_t i = 0; i < transactions.size(); i++) { @@ -1526,6 +1510,7 @@ void PbftManager::reorderTransactions(SharedTransactions &transactions) { // If account_reverse_order size is 0, there is no need to reorder transactions if (account_reverse_order.size() > 0) { + std::unordered_map>> account_nonce_transactions; // Keep the order for all transactions that do not need reordering for (uint32_t i = 0; i < transactions.size(); i++) { const auto &t = transactions[i]; @@ -1601,8 +1586,8 @@ bool PbftManager::pushPbftBlock_(PeriodData &&period_data, std::vectorsavePeriodData(period_data, batch); // Replace current reward votes - vote_mgr_->resetRewardVotesInfo(cert_votes[0]->getPeriod(), cert_votes[0]->getRound(), cert_votes[0]->getBlockHash()); - db_->replaceRewardVotes(cert_votes, batch); + vote_mgr_->resetRewardVotes(cert_votes[0]->getPeriod(), cert_votes[0]->getRound(), cert_votes[0]->getStep(), + cert_votes[0]->getBlockHash(), batch); // pass pbft with dag blocks and transactions to adjust difficulty if (period_data.pbft_blk->getPivotDagBlockHash() != kNullBlockHash) { @@ -1681,6 +1666,10 @@ std::optional>>> PbftMan return std::nullopt; } + if (!validatePbftBlockStateRoot(period_data.pbft_blk)) { + return std::nullopt; + } + // Check reward votes auto reward_votes = vote_mgr_->checkRewardVotes(period_data.pbft_blk, true); if (!reward_votes.first) { @@ -1851,10 +1840,9 @@ void PbftManager::periodDataQueuePush(PeriodData &&period_data, dev::p2p::NodeID size_t PbftManager::periodDataQueueSize() const { return sync_queue_.size(); } bool PbftManager::checkBlockWeight(const std::vector &dag_blocks) const { - u256 total_weight = 0; - for (const auto &dag_block : dag_blocks) { - total_weight += dag_block.getGasEstimation(); - } + const u256 total_weight = + std::accumulate(dag_blocks.begin(), dag_blocks.end(), u256(0), + [](u256 value, const auto &dag_block) { return value + dag_block.getGasEstimation(); }); if (total_weight > config_.gas_limit) { return false; } diff --git a/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp b/libraries/core_libs/consensus/src/rewards/block_stats.cpp similarity index 50% rename from libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp rename to libraries/core_libs/consensus/src/rewards/block_stats.cpp index f14bbb0d11..2e6d1bfbeb 100644 --- a/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/block_stats.cpp @@ -1,36 +1,44 @@ -#include "final_chain/rewards_stats.hpp" +#include "rewards/block_stats.hpp" #include -namespace taraxa { +#include "pbft/pbft_block.hpp" -bool RewardsStats::addTransaction(const trx_hash_t& tx_hash, const addr_t& validator) { - auto found_tx = txs_validators_.find(tx_hash); +namespace taraxa::rewards { + +BlockStats::BlockStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size) + : block_author_(block.pbft_blk->getBeneficiary()), + max_votes_weight_(std::min(committee_size, dpos_vote_count)) { + processStats(block); +} + +bool BlockStats::addTransaction(const trx_hash_t& tx_hash, const addr_t& validator) { + auto found_tx = validator_by_tx_hash_.find(tx_hash); // Already processed tx - if (found_tx != txs_validators_.end()) { + if (found_tx != validator_by_tx_hash_.end()) { return false; } // New tx - txs_validators_[tx_hash] = validator; + validator_by_tx_hash_[tx_hash] = validator; return true; } -std::optional RewardsStats::getTransactionValidator(const trx_hash_t& tx_hash) { - auto found_tx = txs_validators_.find(tx_hash); - if (found_tx == txs_validators_.end()) { +std::optional BlockStats::getTransactionValidator(const trx_hash_t& tx_hash) { + auto found_tx = validator_by_tx_hash_.find(tx_hash); + if (found_tx == validator_by_tx_hash_.end()) { return {}; } return {found_tx->second}; } -bool RewardsStats::addVote(const std::shared_ptr& vote) { +bool BlockStats::addVote(const std::shared_ptr& vote) { // Set valid cert vote to validator auto& validator_stats = validators_stats_[vote->getVoterAddr()]; - // assert(validator_stats.vote_weight_ == 0); + assert(validator_stats.vote_weight_ == 0); assert(vote->getWeight()); if (validator_stats.vote_weight_) { @@ -52,12 +60,12 @@ std::set toTrxHashesSet(const SharedTransactions& transactions) { return block_transactions_hashes_; } -void RewardsStats::initStats(const PeriodData& sync_blk, uint64_t dpos_vote_count, uint32_t committee_size) { - txs_validators_.reserve(sync_blk.transactions.size()); - validators_stats_.reserve(std::max(sync_blk.dag_blocks.size(), sync_blk.previous_block_cert_votes.size())); - auto block_transactions_hashes_ = toTrxHashesSet(sync_blk.transactions); +void BlockStats::processStats(const PeriodData& block) { + validator_by_tx_hash_.reserve(block.transactions.size()); + validators_stats_.reserve(std::max(block.dag_blocks.size(), block.previous_block_cert_votes.size())); + auto block_transactions_hashes_ = toTrxHashesSet(block.transactions); - for (const auto& dag_block : sync_blk.dag_blocks) { + for (const auto& dag_block : block.dag_blocks) { const addr_t& dag_block_author = dag_block.getSender(); bool has_unique_transactions = false; for (const auto& tx_hash : dag_block.getTrxs()) { @@ -78,34 +86,24 @@ void RewardsStats::initStats(const PeriodData& sync_blk, uint64_t dpos_vote_coun } } // total_unique_txs_count_ should be always equal to transactions count in block - assert(txs_validators_.size() == sync_blk.transactions.size()); + assert(validator_by_tx_hash_.size() == block.transactions.size()); - max_votes_weight_ = std::min(committee_size, dpos_vote_count); - for (const auto& vote : sync_blk.previous_block_cert_votes) { + for (const auto& vote : block.previous_block_cert_votes) { addVote(vote); } -} - -std::vector RewardsStats::processStats(const PeriodData& block, uint64_t dpos_vote_count, - uint32_t committee_size) { - initStats(block, dpos_vote_count, committee_size); - - // Dag blocks validators that included transactions to be executed as first in their blocks - std::vector txs_validators; - txs_validators.reserve(block.transactions.size()); + txs_validators_.reserve(block.transactions.size()); for (const auto& tx : block.transactions) { // Non-executed trxs auto tx_validator = getTransactionValidator(tx->getHash()); assert(tx_validator.has_value()); - txs_validators.push_back(*tx_validator); + txs_validators_.push_back(*tx_validator); } - - return txs_validators; } -RLP_FIELDS_DEFINE(RewardsStats::ValidatorStats, dag_blocks_count_, vote_weight_) -RLP_FIELDS_DEFINE(RewardsStats, validators_stats_, total_dag_blocks_count_, total_votes_weight_, max_votes_weight_) +RLP_FIELDS_DEFINE(BlockStats::ValidatorStats, dag_blocks_count_, vote_weight_) +RLP_FIELDS_DEFINE(BlockStats, block_author_, validators_stats_, txs_validators_, total_dag_blocks_count_, + total_votes_weight_, max_votes_weight_) -} // namespace taraxa \ No newline at end of file +} // namespace taraxa::rewards \ No newline at end of file diff --git a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp new file mode 100644 index 0000000000..1d4a03f5e5 --- /dev/null +++ b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp @@ -0,0 +1,75 @@ +#include "rewards/rewards_stats.hpp" + +#include "storage/storage.hpp" + +namespace taraxa::rewards { +Stats::Stats(uint32_t committee_size, const Hardforks::RewardsDistributionMap& rdm, std::shared_ptr db, + std::function&& dpos_eligible_total_vote_count) + : kCommitteeSize(committee_size), + kRewardsDistributionFrequency(rdm), + db_(std::move(db)), + dpos_eligible_total_vote_count_(dpos_eligible_total_vote_count) { + loadFromDb(); +} + +void Stats::loadFromDb() { + auto i = db_->getColumnIterator(DB::Columns::block_rewards_stats); + for (i->SeekToFirst(); i->Valid(); i->Next()) { + blocks_stats_.push_back(util::rlp_dec(dev::RLP(i->value().ToString()))); + } +} + +void Stats::saveBlockStats(uint64_t period, const BlockStats& stats) { + dev::RLPStream encoding; + stats.rlp(encoding); + + db_->insert(DB::Columns::block_rewards_stats, period, encoding.out()); +} + +uint32_t Stats::getCurrentDistributionFrequency(uint64_t current_block) const { + auto itr = kRewardsDistributionFrequency.upper_bound(current_block); + if (kRewardsDistributionFrequency.empty() || itr == kRewardsDistributionFrequency.begin()) { + return 1; + } + return (--itr)->second; +} + +void Stats::clear() { + blocks_stats_.clear(); + db_->deleteColumnData(DB::Columns::block_rewards_stats); +} + +BlockStats Stats::getBlockStats(const PeriodData& blk) { + uint64_t dpos_vote_count = kCommitteeSize; + + // Block zero + if (!blk.previous_block_cert_votes.empty()) [[likely]] { + dpos_vote_count = dpos_eligible_total_vote_count_(blk.previous_block_cert_votes[0]->getPeriod() - 1); + } + + return BlockStats{blk, dpos_vote_count, kCommitteeSize}; +} + +std::vector Stats::processStats(const PeriodData& current_blk) { + const auto current_period = current_blk.pbft_blk->getPeriod(); + const auto frequency = getCurrentDistributionFrequency(current_period); + + // Distribute rewards every block + if (frequency == 1) { + return {getBlockStats(current_blk)}; + } + + blocks_stats_.push_back(getBlockStats(current_blk)); + // Blocks between distribution. Process and save for future processing + if (current_period % frequency != 0) { + // Save to db, so in case of restart data could be just loaded for the period + saveBlockStats(current_period, *blocks_stats_.rbegin()); + return {}; + } + + std::vector res(std::move(blocks_stats_)); + clear(); + return res; +} + +} // namespace taraxa::rewards \ No newline at end of file diff --git a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp index 725978795b..0e1c9ce7ac 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp @@ -380,7 +380,7 @@ std::optional TransactionManager::getBlockTransactions(DagBl transactions.emplace_back(std::move(trx)); } } else { - LOG(log_er_) << "Block " << blk.getHash() << " has missing transaction " << finalizedTransactions.second; + LOG(log_nf_) << "Block " << blk.getHash() << " has missing transaction " << finalizedTransactions.second; return std::nullopt; } diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 74b28f5058..8c37742755 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -31,25 +31,48 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, auto db_votes = db_->getAllTwoTPlusOneVotes(); - auto loadVotesFromDb = [this](const std::vector>& votes) { + auto addVerifiedVotes = [this](const std::vector>& votes, bool set_reward_votes_info = false) { + bool rewards_info_already_set = false; for (const auto& vote : votes) { // Check if votes are unique per round, step & voter if (!isUniqueVote(vote).first) { continue; } + if (set_reward_votes_info && vote->getType() == PbftVoteTypes::cert_vote) { + if (!rewards_info_already_set) { + rewards_info_already_set = true; + reward_votes_block_hash_ = vote->getBlockHash(); + reward_votes_period_ = vote->getPeriod(); + reward_votes_round_ = vote->getRound(); + } else { + assert(reward_votes_block_hash_ == vote->getBlockHash()); + assert(reward_votes_period_ == vote->getPeriod()); + assert(reward_votes_round_ == vote->getRound()); + } + } + addVerifiedVote(vote); LOG(log_dg_) << "Vote " << vote->getHash() << " loaded from db to memory"; } }; - loadVotesFromDb(db_->getAllTwoTPlusOneVotes()); - loadVotesFromDb(db_->getOwnVerifiedVotes()); + // Load 2t+1 vote blocks votes + addVerifiedVotes(db_->getAllTwoTPlusOneVotes(), true); - if (const auto reward_votes = db_->getRewardVotes(); !reward_votes.empty()) { - loadVotesFromDb(reward_votes); - resetRewardVotesInfo(reward_votes[0]->getPeriod(), reward_votes[0]->getRound(), reward_votes[0]->getBlockHash()); + // Load own votes + const auto own_votes = db_->getOwnVerifiedVotes(); + for (const auto& own_vote : own_votes) { + own_verified_votes_.emplace_back(own_vote); } + addVerifiedVotes(own_votes); + + // Load reward votes + const auto reward_votes = db_->getRewardVotes(); + for (const auto& reward_vote : reward_votes) { + extra_reward_votes_.emplace_back(reward_vote->getHash()); + } + addVerifiedVotes(reward_votes); } void VoteManager::setNetwork(std::weak_ptr network) { network_ = std::move(network); } @@ -81,9 +104,9 @@ uint64_t VoteManager::getVerifiedVotesSize() const { for (auto const& period : verified_votes_) { for (auto const& round : period.second) { for (auto const& step : round.second.step_votes) { - for (auto const& voted_value : step.second.votes) { - size += voted_value.second.second.size(); - } + size += std::accumulate( + step.second.votes.begin(), step.second.votes.end(), 0, + [](uint64_t value, const auto& voted_value) { return value + voted_value.second.second.size(); }); } } } @@ -142,6 +165,22 @@ void VoteManager::setCurrentPbftPeriodAndRound(PbftPeriod pbft_period, PbftRound } } +PbftStep VoteManager::getNetworkTplusOneNextVotingStep(PbftPeriod period, PbftRound round) const { + std::shared_lock lock(verified_votes_access_); + + const auto found_period_it = verified_votes_.find(period); + if (found_period_it == verified_votes_.end()) { + return 0; + } + + const auto found_round_it = found_period_it->second.find(round); + if (found_round_it == found_period_it->second.end()) { + return 0; + } + + return found_round_it->second.network_t_plus_one_step; +} + bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { assert(vote->getWeight().has_value()); const auto hash = vote->getHash(); @@ -211,16 +250,32 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { LOG(log_nf_) << "Added verified vote: " << hash; LOG(log_dg_) << "Added verified vote: " << *vote; - // Save in db only those reward votes that have the same round as round during which we pushed the block into chain - if (is_valid_potential_reward_vote && reward_votes_round_ == vote->getRound()) { - db_->saveRewardVote(vote); + if (is_valid_potential_reward_vote) { + extra_reward_votes_.emplace_back(vote->getHash()); + db_->saveExtraRewardVote(vote); } const auto total_weight = (found_voted_value_it->second.first += weight); - // Not enough votes - do not set 2t+1 voted block for period,round and step + // Unable to get 2t+1 const auto two_t_plus_one = getPbftTwoTPlusOne(vote->getPeriod() - 1, vote->getType()); - if (total_weight < two_t_plus_one) { + if (!two_t_plus_one.has_value()) [[unlikely]] { + LOG(log_er_) << "Cannot set(or not) 2t+1 voted block as 2t+1 threshold is unavailable, vote " << vote->getHash(); + return true; + } + + // Calculate t+1 + const auto t_plus_one = ((*two_t_plus_one - 1) / 2) + 1; + // Set network_t_plus_one_step - used for triggering exponential backoff + if (vote->getType() == PbftVoteTypes::next_vote && total_weight >= t_plus_one && + vote->getStep() > found_round_it->second.network_t_plus_one_step) { + found_round_it->second.network_t_plus_one_step = vote->getStep(); + LOG(log_nf_) << "Set t+1 next voted block " << vote->getHash() << " for period " << vote->getPeriod() + << ", round " << vote->getRound() << ", step " << vote->getStep(); + } + + // Not enough votes - do not set 2t+1 voted block for period,round and step + if (total_weight < *two_t_plus_one) { return true; } @@ -249,7 +304,9 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { {two_plus_one_voted_block_type, std::make_pair(vote->getBlockHash(), vote->getStep())}); // Save only current pbft period & round 2t+1 votes bundles into db - if (vote->getPeriod() == current_pbft_period_ && vote->getRound() == current_pbft_round_) { + // Cert votes are saved once the pbft block is pushed in the chain + if (vote->getType() != PbftVoteTypes::cert_vote && vote->getPeriod() == current_pbft_period_ && + vote->getRound() == current_pbft_round_) { std::vector> votes; votes.reserve(found_voted_value_it->second.second.size()); for (const auto& tmp_vote : found_voted_value_it->second.second) { @@ -511,22 +568,70 @@ PbftPeriod VoteManager::getRewardVotesPbftBlockPeriod() { return reward_votes_period_; } -void VoteManager::resetRewardVotesInfo(PbftPeriod period, PbftRound round, const blk_hash_t& block_hash) { +void VoteManager::resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, + DbStorage::Batch& batch) { + // Save 2t+1 cert votes to database, remove old reward votes { std::scoped_lock lock(reward_votes_info_mutex_); - reward_votes_block_hash_ = block_hash; reward_votes_period_ = period; reward_votes_round_ = round; } + std::scoped_lock lock(verified_votes_access_); + auto found_period_it = verified_votes_.find(period); + if (found_period_it == verified_votes_.end()) { + LOG(log_er_) << "resetRewardVotes missing period"; + assert(false); + return; + } + auto found_round_it = found_period_it->second.find(round); + if (found_round_it == found_period_it->second.end()) { + LOG(log_er_) << "resetRewardVotes missing round" << round; + assert(false); + return; + } + auto found_step_it = found_round_it->second.step_votes.find(step); + if (found_step_it == found_round_it->second.step_votes.end()) { + LOG(log_er_) << "resetRewardVotes missing step" << step; + assert(false); + return; + } + auto found_two_t_plus_one_voted_block = + found_round_it->second.two_t_plus_one_voted_blocks_.find(TwoTPlusOneVotedBlockType::CertVotedBlock); + if (found_two_t_plus_one_voted_block == found_round_it->second.two_t_plus_one_voted_blocks_.end()) { + LOG(log_er_) << "resetRewardVotes missing cert voted block"; + assert(false); + return; + } + if (found_two_t_plus_one_voted_block->second.first != block_hash) { + LOG(log_er_) << "resetRewardVotes incorrect block " << found_two_t_plus_one_voted_block->second.first + << " expected " << block_hash; + assert(false); + return; + } + auto found_voted_value_it = found_step_it->second.votes.find(block_hash); + if (found_voted_value_it == found_step_it->second.votes.end()) { + LOG(log_er_) << "resetRewardVotes missing vote block " << block_hash; + assert(false); + return; + } + std::vector> votes; + votes.reserve(found_voted_value_it->second.second.size()); + for (const auto& tmp_vote : found_voted_value_it->second.second) { + votes.push_back(tmp_vote.second); + } + + db_->replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType::CertVotedBlock, votes, batch); + db_->removeExtraRewardVotes(extra_reward_votes_, batch); + extra_reward_votes_.clear(); + LOG(log_dg_) << "Reward votes info reset to: block_hash: " << block_hash << ", period: " << period << ", round: " << round; } bool VoteManager::isValidRewardVote(const std::shared_ptr& vote) const { std::shared_lock lock(reward_votes_info_mutex_); - if (vote->getType() != PbftVoteTypes::cert_vote) { LOG(log_tr_) << "Invalid reward vote: type " << static_cast(vote->getType()) << " is different from cert type"; @@ -607,19 +712,27 @@ std::pair>> VoteManager::checkRewardVote } }; - std::shared_lock reward_votes_info_lock(reward_votes_info_mutex_); + blk_hash_t reward_votes_block_hash; + PbftRound reward_votes_period; + PbftRound reward_votes_round; + { + std::shared_lock reward_votes_info_lock(reward_votes_info_mutex_); + reward_votes_block_hash = reward_votes_block_hash_; + reward_votes_period = reward_votes_period_; + reward_votes_round = reward_votes_round_; + } std::shared_lock verified_votes_lock(verified_votes_access_); - const auto found_period_it = verified_votes_.find(reward_votes_period_); + const auto found_period_it = verified_votes_.find(reward_votes_period); if (found_period_it == verified_votes_.end()) { - LOG(log_er_) << "No reward votes found for period " << reward_votes_period_; + LOG(log_er_) << "No reward votes found for period " << reward_votes_period; assert(false); return {false, {}}; } - const auto found_round_it = found_period_it->second.find(reward_votes_round_); + const auto found_round_it = found_period_it->second.find(reward_votes_round); if (found_round_it == found_period_it->second.end()) { - LOG(log_er_) << "No reward votes found for round " << reward_votes_round_; + LOG(log_er_) << "No reward votes found for round " << reward_votes_round; assert(false); return {false, {}}; } @@ -627,7 +740,7 @@ std::pair>> VoteManager::checkRewardVote const auto reward_votes_hashes = pbft_block->getRewardVotes(); // Most of the time we should get the reward votes based on reward_votes_period_ and reward_votes_round_ - auto reward_votes = getRewardVotes(found_round_it, reward_votes_hashes, reward_votes_block_hash_, copy_votes); + auto reward_votes = getRewardVotes(found_round_it, reward_votes_hashes, reward_votes_block_hash, copy_votes); if (reward_votes.first) [[likely]] { return {true, std::move(reward_votes.second)}; } @@ -636,13 +749,13 @@ std::pair>> VoteManager::checkRewardVote // and when they included the reward votes in new block, these votes have different round than what saved in // reward_votes_round_ -> therefore we have to iterate over all rounds and find the correct round for (auto round_it = found_period_it->second.begin(); round_it != found_period_it->second.end(); round_it++) { - const auto tmp_reward_votes = getRewardVotes(round_it, reward_votes_hashes, reward_votes_block_hash_, copy_votes); + const auto tmp_reward_votes = getRewardVotes(round_it, reward_votes_hashes, reward_votes_block_hash, copy_votes); if (!tmp_reward_votes.first) { LOG(log_dg_) << "No (or not enough) reward votes found for block " << pbft_block->getBlockHash() << ", period: " << pbft_block->getPeriod() << ", prev. block hash: " << pbft_block->getPrevBlockHash() - << ", reward_votes_period_: " << reward_votes_period_ << ", reward_votes_round_: " << round_it->first - << ", reward_votes_block_hash_: " << reward_votes_block_hash_; + << ", reward_votes_period: " << reward_votes_period << ", reward_votes_round_: " << round_it->first + << ", reward_votes_block_hash: " << reward_votes_block_hash; continue; } @@ -651,19 +764,28 @@ std::pair>> VoteManager::checkRewardVote LOG(log_er_) << "No (or not enough) reward votes found for block " << pbft_block->getBlockHash() << ", period: " << pbft_block->getPeriod() << ", prev. block hash: " << pbft_block->getPrevBlockHash() - << ", reward_votes_period_: " << reward_votes_period_ << ", reward_votes_round_: " << reward_votes_round_ - << ", reward_votes_block_hash_: " << reward_votes_block_hash_; + << ", reward_votes_period: " << reward_votes_period << ", reward_votes_round_: " << reward_votes_round + << ", reward_votes_block_hash: " << reward_votes_block_hash; return {false, {}}; } -std::vector> VoteManager::getProposeRewardVotes() { - std::shared_lock lock(reward_votes_info_mutex_); - const auto reward_votes = getTwoTPlusOneVotedBlockVotes(reward_votes_period_, reward_votes_round_, - TwoTPlusOneVotedBlockType::CertVotedBlock); +std::vector> VoteManager::getRewardVotes() { + blk_hash_t reward_votes_block_hash; + PbftRound reward_votes_period; + PbftRound reward_votes_round; + { + std::shared_lock reward_votes_info_lock(reward_votes_info_mutex_); + reward_votes_block_hash = reward_votes_block_hash_; + reward_votes_period = reward_votes_period_; + reward_votes_round = reward_votes_round_; + } + std::shared_lock lock(verified_votes_access_); + auto reward_votes = + getTwoTPlusOneVotedBlockVotes(reward_votes_period, reward_votes_round, TwoTPlusOneVotedBlockType::CertVotedBlock); - if (!reward_votes.empty() && reward_votes[0]->getBlockHash() != reward_votes_block_hash_) { + if (!reward_votes.empty() && reward_votes[0]->getBlockHash() != reward_votes_block_hash) { // This should never happen - LOG(log_er_) << "Proposal reward votes block hash mismatch. reward_votes_block_hash_ " << reward_votes_block_hash_ + LOG(log_er_) << "Proposal reward votes block hash mismatch. reward_votes_block_hash " << reward_votes_block_hash << ", reward_votes[0]->getBlockHash() " << reward_votes[0]->getBlockHash(); assert(false); return {}; @@ -672,6 +794,18 @@ std::vector> VoteManager::getProposeRewardVotes() { return reward_votes; } +void VoteManager::saveOwnVerifiedVote(const std::shared_ptr& vote) { + own_verified_votes_.push_back(vote); + db_->saveOwnVerifiedVote(vote); +} + +std::vector> VoteManager::getOwnVerifiedVotes() { return own_verified_votes_; } + +void VoteManager::clearOwnVerifiedVotes(DbStorage::Batch& write_batch) { + db_->clearOwnVerifiedVotes(write_batch, own_verified_votes_); + own_verified_votes_.clear(); +} + uint64_t VoteManager::getPbftSortitionThreshold(uint64_t total_dpos_votes_count, PbftVoteTypes vote_type) const { switch (vote_type) { case PbftVoteTypes::propose_vote: @@ -871,11 +1005,9 @@ std::optional VoteManager::getTwoTPlusOneVotedBlock(PbftPeriod perio return two_t_plus_one_voted_block_it->second.first; } -std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes( - PbftPeriod period, PbftRound round, TwoTPlusOneVotedBlockType type, - const std::shared_ptr& peer_filter) const { +std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes(PbftPeriod period, PbftRound round, + TwoTPlusOneVotedBlockType type) const { std::shared_lock lock(verified_votes_access_); - const auto found_period_it = verified_votes_.find(period); if (found_period_it == verified_votes_.end()) { return {}; @@ -909,23 +1041,17 @@ std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes( std::vector> votes; votes.reserve(found_verified_votes_it->second.second.size()); for (const auto& vote : found_verified_votes_it->second.second) { - if (peer_filter && peer_filter->isVoteKnown(vote.first)) { - continue; - } - votes.push_back(vote.second); } return votes; } -std::vector> VoteManager::getAllTwoTPlusOneNextVotes( - PbftPeriod period, PbftRound round, const std::shared_ptr& peer_filter) const { - auto next_votes = - getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedBlock, peer_filter); +std::vector> VoteManager::getAllTwoTPlusOneNextVotes(PbftPeriod period, PbftRound round) const { + auto next_votes = getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedBlock); auto null_block_next_vote = - getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedNullBlock, peer_filter); + getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedNullBlock); if (!null_block_next_vote.empty()) { next_votes.reserve(next_votes.size() + null_block_next_vote.size()); next_votes.insert(next_votes.end(), std::make_move_iterator(null_block_next_vote.begin()), diff --git a/libraries/core_libs/network/graphql/src/query.cpp b/libraries/core_libs/network/graphql/src/query.cpp index a71874823b..7606de867b 100644 --- a/libraries/core_libs/network/graphql/src/query.cpp +++ b/libraries/core_libs/network/graphql/src/query.cpp @@ -1,5 +1,7 @@ #include "graphql/query.hpp" +#include + #include "graphql/account.hpp" #include "graphql/block.hpp" #include "graphql/log.hpp" diff --git a/libraries/core_libs/network/graphql/src/types/dag_block.cpp b/libraries/core_libs/network/graphql/src/types/dag_block.cpp index e093a3acc8..7124be5fd2 100644 --- a/libraries/core_libs/network/graphql/src/types/dag_block.cpp +++ b/libraries/core_libs/network/graphql/src/types/dag_block.cpp @@ -1,5 +1,7 @@ #include "graphql/types/dag_block.hpp" +#include + #include "graphql/account.hpp" #include "graphql/transaction.hpp" diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp index 7f140fc72d..0ba977ef2c 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp @@ -63,14 +63,6 @@ class ExtVotesPacketHandler : public PacketHandler { const std::shared_ptr& peer, bool validate_max_round_step); - /** - * @brief Common validation for all types of votes - * - * @param vote to be validated - * @return vote validation passed, otherwise - */ - std::pair validateVote(const std::shared_ptr& vote) const; - /** * @brief Validates provided vote if voted value == provided block * diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp index c503066bbb..bccf58638f 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp @@ -5,6 +5,7 @@ namespace taraxa { class PbftChain; class DbStorage; +class VoteManager; } // namespace taraxa namespace taraxa::network::tarcap { @@ -16,9 +17,10 @@ class GetPbftSyncPacketHandler final : public PacketHandler { GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, - std::shared_ptr db, const addr_t& node_addr); + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t& node_addr); - void sendPbftBlocks(dev::p2p::NodeID const& peer_id, PbftPeriod from_period, size_t blocks_to_transfer, + void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced); // Packet type that is processed by this handler @@ -30,6 +32,7 @@ class GetPbftSyncPacketHandler final : public PacketHandler { std::shared_ptr pbft_syncing_state_; std::shared_ptr pbft_chain_; + std::shared_ptr vote_mgr_; std::shared_ptr db_; }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp index d205ec2470..cd71341cf5 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp @@ -27,8 +27,7 @@ class TransactionPacketHandler final : public PacketHandler { * @param transactions serialized transactions * */ - void sendTransactions(std::shared_ptr const& peer, - std::vector>&& transactions); + void sendTransactions(std::shared_ptr peer, std::vector>&& transactions); /** * @brief Sends batch of transactions to all connected peers diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/vote_packet_handler.hpp index 9035d7945a..13c30d346a 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/vote_packet_handler.hpp @@ -16,8 +16,10 @@ class VotePacketHandler final : public ExtVotesPacketHandler { * * @param vote Votes to send * @param block block to send - nullptr means no block + * @param rebroadcast - send even of vote i known for the peer */ - void onNewPbftVote(const std::shared_ptr& vote, const std::shared_ptr& block); + void onNewPbftVote(const std::shared_ptr& vote, const std::shared_ptr& block, + bool rebroadcast = false); void sendPbftVote(const std::shared_ptr& peer, const std::shared_ptr& vote, const std::shared_ptr& block); diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp index faad6cc7ec..b651d85b5c 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp @@ -88,7 +88,7 @@ class TaraxaCapability : public dev::p2p::CapabilityFace { // END METHODS USED IN TESTS ONLY protected: - virtual void initPeriodicEvents(const std::shared_ptr &pbft_mgr, const std::shared_ptr &db, + virtual void initPeriodicEvents(const std::shared_ptr &pbft_mgr, std::shared_ptr trx_mgr, std::shared_ptr packets_stats); virtual void registerPacketHandlers(const h256 &genesis_hash, diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp index 8defcb5886..34df483adc 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp @@ -127,7 +127,7 @@ class TaraxaPeer : public boost::noncopyable { const uint64_t kMaxSuspiciousPacketPerMinute = 1000; // Performance extensive dag syncing is only allowed to be requested once each kDagSyncingLimit seconds - const uint64_t kDagSyncingLimit = 300; + const uint64_t kDagSyncingLimit = 60; // Packets stats for packets sent by *this TaraxaPeer PacketsStats sent_packets_stats_; diff --git a/libraries/core_libs/network/include/network/ws_server.hpp b/libraries/core_libs/network/include/network/ws_server.hpp index 7dd18406cc..0fcd33dbb9 100644 --- a/libraries/core_libs/network/include/network/ws_server.hpp +++ b/libraries/core_libs/network/include/network/ws_server.hpp @@ -49,11 +49,11 @@ class WsSession : public std::enable_shared_from_this { virtual std::string processRequest(const std::string_view& request) = 0; - void newEthBlock(::taraxa::final_chain::BlockHeader const& payload); - void newDagBlock(DagBlock const& blk); - void newDagBlockFinalized(blk_hash_t const& blk, uint64_t period); - void newPbftBlockExecuted(Json::Value const& payload); - void newPendingTransaction(trx_hash_t const& trx_hash); + void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); + void newDagBlock(const DagBlock& blk); + void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); + void newPbftBlockExecuted(const Json::Value& payload); + void newPendingTransaction(const trx_hash_t& trx_hash); bool is_closed() const { return closed_; } bool is_normal(const beast::error_code& ec) const; LOG_OBJECTS_DEFINE @@ -90,11 +90,11 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: // Start accepting incoming connections void run(); - void newEthBlock(::taraxa::final_chain::BlockHeader const& payload); - void newDagBlock(DagBlock const& blk); - void newDagBlockFinalized(blk_hash_t const& blk, uint64_t period); - void newPbftBlockExecuted(PbftBlock const& sche_blk, std::vector const& finalized_dag_blk_hashes); - void newPendingTransaction(trx_hash_t const& trx_hash); + void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); + void newDagBlock(const DagBlock& blk); + void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); + void newPbftBlockExecuted(const PbftBlock& sche_blk, const std::vector& finalized_dag_blk_hashes); + void newPendingTransaction(const trx_hash_t& trx_hash); virtual std::shared_ptr createSession(tcp::socket&& socket) = 0; diff --git a/libraries/core_libs/network/rpc/CMakeLists.txt b/libraries/core_libs/network/rpc/CMakeLists.txt index 08056792fa..23273a886a 100644 --- a/libraries/core_libs/network/rpc/CMakeLists.txt +++ b/libraries/core_libs/network/rpc/CMakeLists.txt @@ -1,20 +1,20 @@ # Note: run make gen_rpc_stubs to re-generate rpc classes -include(EthDependencies) -include(EthExecutableHelper) +# include(EthDependencies) +# include(EthExecutableHelper) -find_program(ETH_JSON_RPC_STUB jsonrpcstub) +# find_program(ETH_JSON_RPC_STUB jsonrpcstub) -set(CPP_NAMESPACE taraxa::net) -file(GLOB API_DEF_FILES "${CMAKE_CURRENT_SOURCE_DIR}/*.jsonrpc.json") -add_custom_target(gen_rpc_stubs) -foreach (api_def_file ${API_DEF_FILES}) - get_filename_component(api_name ${api_def_file} NAME_WE) - jsonrpcstub_create( - gen_rpc_stubs ${api_name}.jsonrpc.json - ${CPP_NAMESPACE}::${api_name}Face - ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Face - ${CPP_NAMESPACE}::${api_name}Client - ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Client - ) -endforeach () +# set(CPP_NAMESPACE taraxa::net) +# file(GLOB API_DEF_FILES "${CMAKE_CURRENT_SOURCE_DIR}/*.jsonrpc.json") +# add_custom_target(gen_rpc_stubs) +# foreach (api_def_file ${API_DEF_FILES}) +# get_filename_component(api_name ${api_def_file} NAME_WE) +# jsonrpcstub_create( +# gen_rpc_stubs ${api_name}.jsonrpc.json +# ${CPP_NAMESPACE}::${api_name}Face +# ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Face +# ${CPP_NAMESPACE}::${api_name}Client +# ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Client +# ) +# endforeach () diff --git a/libraries/core_libs/network/rpc/Debug.cpp b/libraries/core_libs/network/rpc/Debug.cpp index 1a79ed1de9..4e5e93b315 100644 --- a/libraries/core_libs/network/rpc/Debug.cpp +++ b/libraries/core_libs/network/rpc/Debug.cpp @@ -12,22 +12,22 @@ using namespace jsonrpc; using namespace taraxa; namespace taraxa::net { + +inline EthBlockNumber get_ctx_block_num(EthBlockNumber block_number) { + return (block_number >= 1) ? block_number - 1 : 0; +} + Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { Json::Value res; try { + auto [trx, loc] = get_transaction_with_location(transaction_hash); + if (!trx || !loc) { + res["status"] = "Transaction not found"; + return res; + } if (auto node = full_node_.lock()) { - const auto hash = jsToFixed<32>(transaction_hash); - const auto trx = node->getDB()->getTransaction(hash); - if (!trx) { - res["status"] = "Transaction not found"; - return res; - } - const auto loc = node->getFinalChain()->transaction_location(hash); - if (!loc) { - res["status"] = "Transaction not found"; - return res; - } - return util::readJsonFromString(node->getFinalChain()->trace_trx(to_eth_trx(trx), loc->blk_n)); + return util::readJsonFromString( + node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, get_ctx_block_num(loc->blk_n))); } } catch (std::exception& e) { res["status"] = e.what(); @@ -41,7 +41,7 @@ Json::Value Debug::debug_traceCall(const Json::Value& call_params, const std::st const auto block = parse_blk_num(blk_num); auto trx = to_eth_trx(call_params, block); if (auto node = full_node_.lock()) { - return util::readJsonFromString(node->getFinalChain()->trace_trx(std::move(trx), block)); + return util::readJsonFromString(node->getFinalChain()->trace({std::move(trx)}, block)); } } catch (std::exception& e) { res["status"] = e.what(); @@ -54,10 +54,53 @@ Json::Value Debug::trace_call(const Json::Value& call_params, const Json::Value& Json::Value res; try { const auto block = parse_blk_num(blk_num); - auto trx = to_eth_trx(call_params, block); auto params = parse_tracking_parms(trace_params); if (auto node = full_node_.lock()) { - return util::readJsonFromString(node->getFinalChain()->trace_trx(std::move(trx), block, std::move(params))); + return util::readJsonFromString( + node->getFinalChain()->trace({to_eth_trx(call_params, block)}, block, std::move(params))); + } + } catch (std::exception& e) { + res["status"] = e.what(); + } + return res; +} + +Json::Value Debug::trace_replayTransaction(const std::string& transaction_hash, const Json::Value& trace_params) { + Json::Value res; + try { + auto params = parse_tracking_parms(trace_params); + auto [trx, loc] = get_transaction_with_location(transaction_hash); + if (!trx || !loc) { + res["status"] = "Transaction not found"; + return res; + } + if (auto node = full_node_.lock()) { + return util::readJsonFromString( + node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, get_ctx_block_num(loc->blk_n), std::move(params))); + } + } catch (std::exception& e) { + res["status"] = e.what(); + } + return res; +} + +Json::Value Debug::trace_replayBlockTransactions(const std::string& block_num, const Json::Value& trace_params) { + Json::Value res; + try { + const auto block = parse_blk_num(block_num); + auto params = parse_tracking_parms(trace_params); + if (auto node = full_node_.lock()) { + auto transactions = node->getDB()->getPeriodTransactions(block); + if (!transactions.has_value() || transactions->empty()) { + res["status"] = "Block has no transactions"; + return res; + } + std::vector trxs; + trxs.reserve(transactions->size()); + std::transform(transactions->begin(), transactions->end(), std::back_inserter(trxs), + [this](auto t) { return to_eth_trx(std::move(t)); }); + return util::readJsonFromString( + node->getFinalChain()->trace(std::move(trxs), get_ctx_block_num(block), std::move(params))); } } catch (std::exception& e) { res["status"] = e.what(); @@ -72,7 +115,8 @@ state_api::Tracing Debug::parse_tracking_parms(const Json::Value& json) const { } for (const auto& obj : json) { if (obj.asString() == "trace") ret.trace = true; - if (obj.asString() == "stateDiff") ret.stateDiff = true; + // Disabled for now + // if (obj.asString() == "stateDiff") ret.stateDiff = true; if (obj.asString() == "vmTrace") ret.vmTrace = true; } return ret; @@ -91,13 +135,13 @@ state_api::EVMTransaction Debug::to_eth_trx(const Json::Value& json, EthBlockNum } if (!json["from"].empty()) { - trx.from = toAddress(json["from"].asString()); + trx.from = to_address(json["from"].asString()); } else { trx.from = ZeroAddress; } if (!json["to"].empty() && json["to"].asString() != "0x" && !json["to"].asString().empty()) { - trx.to = toAddress(json["to"].asString()); + trx.to = to_address(json["to"].asString()); } if (!json["value"].empty()) { @@ -144,7 +188,7 @@ EthBlockNumber Debug::parse_blk_num(const string& blk_num_str) { return jsToInt(blk_num_str); } -Address Debug::toAddress(const string& s) const { +Address Debug::to_address(const string& s) const { try { if (auto b = fromHex(s.substr(0, 2) == "0x" ? s.substr(2) : s, WhenError::Throw); b.size() == Address::size) { return Address(b); @@ -154,4 +198,13 @@ Address Debug::toAddress(const string& s) const { throw InvalidAddress(); } +std::pair, std::optional> +Debug::get_transaction_with_location(const std::string& transaction_hash) const { + if (auto node = full_node_.lock()) { + const auto hash = jsToFixed<32>(transaction_hash); + return {node->getDB()->getTransaction(hash), node->getFinalChain()->transaction_location(hash)}; + } + return {}; +} + } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Debug.h b/libraries/core_libs/network/rpc/Debug.h index 936e193177..d318f9deee 100644 --- a/libraries/core_libs/network/rpc/Debug.h +++ b/libraries/core_libs/network/rpc/Debug.h @@ -36,13 +36,17 @@ class Debug : public DebugFace { virtual Json::Value debug_traceCall(const Json::Value& param1, const std::string& param2) override; virtual Json::Value trace_call(const Json::Value& param1, const Json::Value& param2, const std::string& param3) override; + virtual Json::Value trace_replayTransaction(const std::string& param1, const Json::Value& param2) override; + virtual Json::Value trace_replayBlockTransactions(const std::string& param1, const Json::Value& param2) override; private: state_api::EVMTransaction to_eth_trx(std::shared_ptr t) const; state_api::EVMTransaction to_eth_trx(const Json::Value& json, EthBlockNumber blk_num); EthBlockNumber parse_blk_num(const string& blk_num_str); state_api::Tracing parse_tracking_parms(const Json::Value& json) const; - Address toAddress(const string& s) const; + Address to_address(const string& s) const; + std::pair, std::optional> + get_transaction_with_location(const std::string& transaction_hash) const; std::weak_ptr full_node_; const uint64_t kGasLimit = ((uint64_t)1 << 53) - 1; diff --git a/libraries/core_libs/network/rpc/Debug.jsonrpc.json b/libraries/core_libs/network/rpc/Debug.jsonrpc.json index 8a54721da9..88c5411af1 100644 --- a/libraries/core_libs/network/rpc/Debug.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Debug.jsonrpc.json @@ -25,5 +25,23 @@ ], "order": [], "returns": {} + }, + { + "name": "trace_replayTransaction", + "params": [ + "", + [] + ], + "order": [], + "returns": {} + }, + { + "name": "trace_replayBlockTransactions", + "params": [ + "", + [] + ], + "order": [], + "returns": {} } ] \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/DebugClient.h b/libraries/core_libs/network/rpc/DebugClient.h index 05d3904bd9..f0faaf58ec 100644 --- a/libraries/core_libs/network/rpc/DebugClient.h +++ b/libraries/core_libs/network/rpc/DebugClient.h @@ -45,6 +45,28 @@ class DebugClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } + Json::Value trace_replayTransaction(const std::string& param1, + const Json::Value& param2) throw(jsonrpc::JsonRpcException) { + Json::Value p; + p.append(param1); + p.append(param2); + Json::Value result = this->CallMethod("trace_replayTransaction", p); + if (result.isObject()) + return result; + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } + Json::Value trace_replayBlockTransactions(const std::string& param1, + const Json::Value& param2) throw(jsonrpc::JsonRpcException) { + Json::Value p; + p.append(param1); + p.append(param2); + Json::Value result = this->CallMethod("trace_replayBlockTransactions", p); + if (result.isObject()) + return result; + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } }; } // namespace net diff --git a/libraries/core_libs/network/rpc/DebugFace.h b/libraries/core_libs/network/rpc/DebugFace.h index 3411cecb00..fbfd16f363 100644 --- a/libraries/core_libs/network/rpc/DebugFace.h +++ b/libraries/core_libs/network/rpc/DebugFace.h @@ -22,6 +22,14 @@ class DebugFace : public ServerInterface { jsonrpc::Procedure("trace_call", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_OBJECT, "param2", jsonrpc::JSON_ARRAY, "param3", jsonrpc::JSON_STRING, NULL), &taraxa::net::DebugFace::trace_callI); + this->bindAndAddMethod( + jsonrpc::Procedure("trace_replayTransaction", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", + jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_ARRAY, NULL), + &taraxa::net::DebugFace::trace_replayTransactionI); + this->bindAndAddMethod( + jsonrpc::Procedure("trace_replayBlockTransactions", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", + jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_ARRAY, NULL), + &taraxa::net::DebugFace::trace_replayBlockTransactionsI); } inline virtual void debug_traceTransactionI(const Json::Value& request, Json::Value& response) { @@ -33,9 +41,17 @@ class DebugFace : public ServerInterface { inline virtual void trace_callI(const Json::Value& request, Json::Value& response) { response = this->trace_call(request[0u], request[1u], request[2u].asString()); } + inline virtual void trace_replayTransactionI(const Json::Value& request, Json::Value& response) { + response = this->trace_replayTransaction(request[0u].asString(), request[1u]); + } + inline virtual void trace_replayBlockTransactionsI(const Json::Value& request, Json::Value& response) { + response = this->trace_replayBlockTransactions(request[0u].asString(), request[1u]); + } virtual Json::Value debug_traceTransaction(const std::string& param1) = 0; virtual Json::Value debug_traceCall(const Json::Value& param1, const std::string& param2) = 0; virtual Json::Value trace_call(const Json::Value& param1, const Json::Value& param2, const std::string& param3) = 0; + virtual Json::Value trace_replayTransaction(const std::string& param1, const Json::Value& param2) = 0; + virtual Json::Value trace_replayBlockTransactions(const std::string& param1, const Json::Value& param2) = 0; }; } // namespace net diff --git a/libraries/core_libs/network/rpc/Eth.jsonrpc.json b/libraries/core_libs/network/rpc/Eth.jsonrpc.json index eb1efbe28e..77d724669c 100644 --- a/libraries/core_libs/network/rpc/Eth.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Eth.jsonrpc.json @@ -33,7 +33,7 @@ "name": "eth_getBalance", "params": [ "", - "" + {} ], "order": [], "returns": "" @@ -43,7 +43,7 @@ "params": [ "", "", - "" + {} ], "order": [], "returns": "" @@ -61,7 +61,7 @@ "name": "eth_getTransactionCount", "params": [ "", - "" + {} ], "order": [], "returns": "" @@ -102,7 +102,7 @@ "name": "eth_getCode", "params": [ "", - "" + {} ], "order": [], "returns": "" @@ -111,7 +111,7 @@ "name": "eth_call", "params": [ {}, - "" + {} ], "order": [], "returns": "" diff --git a/libraries/core_libs/network/rpc/EthClient.h b/libraries/core_libs/network/rpc/EthClient.h index 0e9a220298..47869dfe8b 100644 --- a/libraries/core_libs/network/rpc/EthClient.h +++ b/libraries/core_libs/network/rpc/EthClient.h @@ -59,7 +59,7 @@ class EthClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } - std::string eth_getBalance(const std::string& param1, const std::string& param2) throw(jsonrpc::JsonRpcException) { + std::string eth_getBalance(const std::string& param1, const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -70,7 +70,7 @@ class EthClient : public jsonrpc::Client { throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } std::string eth_getStorageAt(const std::string& param1, const std::string& param2, - const std::string& param3) throw(jsonrpc::JsonRpcException) { + const Json::Value& param3) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -93,7 +93,7 @@ class EthClient : public jsonrpc::Client { throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } std::string eth_getTransactionCount(const std::string& param1, - const std::string& param2) throw(jsonrpc::JsonRpcException) { + const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -139,7 +139,7 @@ class EthClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } - std::string eth_getCode(const std::string& param1, const std::string& param2) throw(jsonrpc::JsonRpcException) { + std::string eth_getCode(const std::string& param1, const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -149,7 +149,7 @@ class EthClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } - std::string eth_call(const Json::Value& param1, const std::string& param2) throw(jsonrpc::JsonRpcException) { + std::string eth_call(const Json::Value& param1, const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); diff --git a/libraries/core_libs/network/rpc/EthFace.h b/libraries/core_libs/network/rpc/EthFace.h index d050fad608..d3d415636c 100644 --- a/libraries/core_libs/network/rpc/EthFace.h +++ b/libraries/core_libs/network/rpc/EthFace.h @@ -9,6 +9,8 @@ namespace taraxa { namespace net { +// Please read README +const int JSON_ANY = 0; class EthFace : public ServerInterface { public: EthFace() { @@ -25,24 +27,24 @@ class EthFace : public ServerInterface { jsonrpc::Procedure("eth_blockNumber", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_blockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBalance", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + "param1", jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getBalanceI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getStorageAt", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, "param3", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_STRING, "param2", JSON_ANY, "param3", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getStorageAtI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getStorageRoot", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + "param1", jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getStorageRootI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getTransactionCount", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getTransactionCountI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockTransactionCountByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getBlockTransactionCountByHashI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockTransactionCountByNumber", jsonrpc::PARAMS_BY_POSITION, - jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_OBJECT, "param1", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getBlockTransactionCountByNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getUncleCountByBlockHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), @@ -51,16 +53,16 @@ class EthFace : public ServerInterface { jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getUncleCountByBlockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getCode", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + "param1", jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getCodeI); this->bindAndAddMethod(jsonrpc::Procedure("eth_call", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_OBJECT, "param2", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_OBJECT, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_callI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_BOOLEAN, NULL), &taraxa::net::EthFace::eth_getBlockByHashI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockByNumber", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_BOOLEAN, NULL), + "param1", JSON_ANY, "param2", jsonrpc::JSON_BOOLEAN, NULL), &taraxa::net::EthFace::eth_getBlockByNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getTransactionByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), @@ -69,10 +71,9 @@ class EthFace : public ServerInterface { jsonrpc::Procedure("eth_getTransactionByBlockHashAndIndex", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getTransactionByBlockHashAndIndexI); - this->bindAndAddMethod( - jsonrpc::Procedure("eth_getTransactionByBlockNumberAndIndex", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), - &taraxa::net::EthFace::eth_getTransactionByBlockNumberAndIndexI); + this->bindAndAddMethod(jsonrpc::Procedure("eth_getTransactionByBlockNumberAndIndex", jsonrpc::PARAMS_BY_POSITION, + jsonrpc::JSON_OBJECT, "param1", JSON_ANY, "param2", JSON_ANY, NULL), + &taraxa::net::EthFace::eth_getTransactionByBlockNumberAndIndexI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getTransactionReceipt", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getTransactionReceiptI); @@ -138,16 +139,16 @@ class EthFace : public ServerInterface { response = this->eth_blockNumber(); } inline virtual void eth_getBalanceI(const Json::Value &request, Json::Value &response) { - response = this->eth_getBalance(request[0u].asString(), request[1u].asString()); + response = this->eth_getBalance(request[0u].asString(), request[1u]); } inline virtual void eth_getStorageAtI(const Json::Value &request, Json::Value &response) { - response = this->eth_getStorageAt(request[0u].asString(), request[1u].asString(), request[2u].asString()); + response = this->eth_getStorageAt(request[0u].asString(), request[1u].asString(), request[2u]); } inline virtual void eth_getStorageRootI(const Json::Value &request, Json::Value &response) { response = this->eth_getStorageRoot(request[0u].asString(), request[1u].asString()); } inline virtual void eth_getTransactionCountI(const Json::Value &request, Json::Value &response) { - response = this->eth_getTransactionCount(request[0u].asString(), request[1u].asString()); + response = this->eth_getTransactionCount(request[0u].asString(), request[1u]); } inline virtual void eth_getBlockTransactionCountByHashI(const Json::Value &request, Json::Value &response) { response = this->eth_getBlockTransactionCountByHash(request[0u].asString()); @@ -162,10 +163,10 @@ class EthFace : public ServerInterface { response = this->eth_getUncleCountByBlockNumber(request[0u].asString()); } inline virtual void eth_getCodeI(const Json::Value &request, Json::Value &response) { - response = this->eth_getCode(request[0u].asString(), request[1u].asString()); + response = this->eth_getCode(request[0u].asString(), request[1u]); } inline virtual void eth_callI(const Json::Value &request, Json::Value &response) { - response = this->eth_call(request[0u], request[1u].asString()); + response = this->eth_call(request[0u], request[1u]); } inline virtual void eth_getBlockByHashI(const Json::Value &request, Json::Value &response) { response = this->eth_getBlockByHash(request[0u].asString(), request[1u].asBool()); @@ -233,17 +234,17 @@ class EthFace : public ServerInterface { virtual std::string eth_gasPrice() = 0; virtual Json::Value eth_accounts() = 0; virtual std::string eth_blockNumber() = 0; - virtual std::string eth_getBalance(const std::string ¶m1, const std::string ¶m2) = 0; + virtual std::string eth_getBalance(const std::string ¶m1, const Json::Value ¶m2) = 0; virtual std::string eth_getStorageAt(const std::string ¶m1, const std::string ¶m2, - const std::string ¶m3) = 0; + const Json::Value ¶m3) = 0; virtual std::string eth_getStorageRoot(const std::string ¶m1, const std::string ¶m2) = 0; - virtual std::string eth_getTransactionCount(const std::string ¶m1, const std::string ¶m2) = 0; + virtual std::string eth_getTransactionCount(const std::string ¶m1, const Json::Value ¶m2) = 0; virtual Json::Value eth_getBlockTransactionCountByHash(const std::string ¶m1) = 0; virtual Json::Value eth_getBlockTransactionCountByNumber(const std::string ¶m1) = 0; virtual Json::Value eth_getUncleCountByBlockHash(const std::string ¶m1) = 0; virtual Json::Value eth_getUncleCountByBlockNumber(const std::string ¶m1) = 0; - virtual std::string eth_getCode(const std::string ¶m1, const std::string ¶m2) = 0; - virtual std::string eth_call(const Json::Value ¶m1, const std::string ¶m2) = 0; + virtual std::string eth_getCode(const std::string ¶m1, const Json::Value ¶m2) = 0; + virtual std::string eth_call(const Json::Value ¶m1, const Json::Value ¶m2) = 0; virtual Json::Value eth_getBlockByHash(const std::string ¶m1, bool param2) = 0; virtual Json::Value eth_getBlockByNumber(const std::string ¶m1, bool param2) = 0; virtual Json::Value eth_getTransactionByHash(const std::string ¶m1) = 0; diff --git a/libraries/core_libs/network/rpc/README.md b/libraries/core_libs/network/rpc/README.md new file mode 100644 index 0000000000..761ae481ff --- /dev/null +++ b/libraries/core_libs/network/rpc/README.md @@ -0,0 +1,9 @@ +# DISABLED !!!! How to generate new API interface +``` +sudo apt install libjsonrpccpp-tools +make gen_rpc_stubs +make clang-format +``` + +# PLEASE READ +As libjsonrpccpp doesn't support function arguments overload I had to made HACK and introduce `JSON_ANY`. `jsonrpc::Procedure` is created to check validation of passed arguments, but as it is not class enum we can used anything above 7 or number 0 to disable this check more https://github.com/cinemast/libjson-rpc-cpp/blob/d5ede2277d849f1a9d2dc111c4ec3ea652bd31ec/src/jsonrpccpp/common/specification.h#L29 . That's why generation via `gen_rpc_stubs` is disabled, because it would overwrite my hack in `EthFace.h` \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index 87267ce200..7fe42bc23d 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -137,4 +137,18 @@ Json::Value Taraxa::taraxa_getDagBlockByLevel(const string& _blockLevel, bool _i } Json::Value Taraxa::taraxa_getConfig() { return enc_json(tryGetNode()->getConfig().genesis); } + +Json::Value Taraxa::taraxa_getChainStats() { + Json::Value res; + try { + if (auto node = full_node_.lock()) { + res["pbft_period"] = Json::UInt64(node->getPbftChain()->getPbftChainSize()); + res["dag_blocks_executed"] = Json::UInt64(node->getDB()->getNumBlockExecuted()); + res["transactions_executed"] = Json::UInt64(node->getDB()->getNumTransactionExecuted()); + } + } catch (std::exception& e) { + res["status"] = e.what(); + } + return res; +} } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Taraxa.h b/libraries/core_libs/network/rpc/Taraxa.h index cfb774a28e..39b32cca34 100644 --- a/libraries/core_libs/network/rpc/Taraxa.h +++ b/libraries/core_libs/network/rpc/Taraxa.h @@ -28,6 +28,7 @@ class Taraxa : public TaraxaFace { virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string& _period) override; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string& _period) override; virtual Json::Value taraxa_getConfig() override; + virtual Json::Value taraxa_getChainStats() override; protected: std::weak_ptr full_node_; diff --git a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json index 48dca28de3..15e5b2135c 100644 --- a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json @@ -53,6 +53,12 @@ "order": [], "returns": {} }, + { + "name": "taraxa_getChainStats", + "params": [], + "order": [], + "returns": {} + }, { "name": "taraxa_pbftBlockHashByPeriod", "params": [""], diff --git a/libraries/core_libs/network/rpc/TaraxaClient.h b/libraries/core_libs/network/rpc/TaraxaClient.h index 91d49c94dd..a8a3c105a1 100644 --- a/libraries/core_libs/network/rpc/TaraxaClient.h +++ b/libraries/core_libs/network/rpc/TaraxaClient.h @@ -88,6 +88,15 @@ class TaraxaClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } + Json::Value taraxa_getChainStats() throw(jsonrpc::JsonRpcException) { + Json::Value p; + p = Json::nullValue; + Json::Value result = this->CallMethod("taraxa_getChainStats", p); + if (result.isObject()) + return result; + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } std::string taraxa_pbftBlockHashByPeriod(const std::string& param1) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); diff --git a/libraries/core_libs/network/rpc/TaraxaFace.h b/libraries/core_libs/network/rpc/TaraxaFace.h index bd53cf7b08..acf4d29c0d 100644 --- a/libraries/core_libs/network/rpc/TaraxaFace.h +++ b/libraries/core_libs/network/rpc/TaraxaFace.h @@ -38,6 +38,9 @@ class TaraxaFace : public ServerInterface { this->bindAndAddMethod( jsonrpc::Procedure("taraxa_getConfig", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), &taraxa::net::TaraxaFace::taraxa_getConfigI); + this->bindAndAddMethod( + jsonrpc::Procedure("taraxa_getChainStats", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), + &taraxa::net::TaraxaFace::taraxa_getChainStatsI); this->bindAndAddMethod(jsonrpc::Procedure("taraxa_pbftBlockHashByPeriod", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::TaraxaFace::taraxa_pbftBlockHashByPeriodI); @@ -72,6 +75,10 @@ class TaraxaFace : public ServerInterface { (void)request; response = this->taraxa_getConfig(); } + inline virtual void taraxa_getChainStatsI(const Json::Value &request, Json::Value &response) { + (void)request; + response = this->taraxa_getChainStats(); + } inline virtual void taraxa_pbftBlockHashByPeriodI(const Json::Value &request, Json::Value &response) { response = this->taraxa_pbftBlockHashByPeriod(request[0u].asString()); } @@ -83,6 +90,7 @@ class TaraxaFace : public ServerInterface { virtual std::string taraxa_dagBlockPeriod() = 0; virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string ¶m1) = 0; virtual Json::Value taraxa_getConfig() = 0; + virtual Json::Value taraxa_getChainStats() = 0; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string ¶m1) = 0; }; diff --git a/libraries/core_libs/network/rpc/Test.cpp b/libraries/core_libs/network/rpc/Test.cpp index cb43a78b3d..0448abd06a 100644 --- a/libraries/core_libs/network/rpc/Test.cpp +++ b/libraries/core_libs/network/rpc/Test.cpp @@ -77,9 +77,8 @@ Json::Value Test::send_coin_transactions(const Json::Value ¶m1) { auto gas = dev::jsToInt(param1["gas"].asString()); auto transactions_count = param1["transaction_count"].asUInt64(); std::vector receivers; - for (auto rec : param1["receiver"]) { - receivers.emplace_back(addr_t(rec.asString())); - } + std::transform(param1["receiver"].begin(), param1["receiver"].end(), std::back_inserter(receivers), + [](const auto rec) { return addr_t(rec.asString()); }); for (uint32_t i = 0; i < transactions_count; i++) { auto trx = std::make_shared(nonce, value, gas_price, gas, bytes(), sk, receivers[i % receivers.size()], kChainId); diff --git a/libraries/core_libs/network/rpc/eth/Eth.cpp b/libraries/core_libs/network/rpc/eth/Eth.cpp index 87cc70bfef..735217b279 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.cpp +++ b/libraries/core_libs/network/rpc/eth/Eth.cpp @@ -8,11 +8,106 @@ #include "LogFilter.hpp" +using namespace std; +using namespace dev; +using namespace taraxa::final_chain; +using namespace taraxa::state_api; + namespace taraxa::net::rpc::eth { -using namespace ::std; -using namespace ::dev; -using namespace ::taraxa::final_chain; -using namespace ::taraxa::state_api; +void add(Json::Value& obj, const optional& info) { + obj["blockNumber"] = info ? toJS(info->blk_n) : Json::Value(); + obj["blockHash"] = info ? toJS(info->blk_h) : Json::Value(); + obj["transactionIndex"] = info ? toJS(info->index) : Json::Value(); +} + +void add(Json::Value& obj, const ExtendedTransactionLocation& info) { + add(obj, static_cast(info)); + obj["transactionHash"] = toJS(info.trx_hash); +} + +Json::Value toJson(const Transaction& trx, const optional& loc) { + Json::Value res(Json::objectValue); + add(res, loc); + res["hash"] = toJS(trx.getHash()); + res["input"] = toJS(trx.getData()); + res["to"] = toJson(trx.getReceiver()); + res["from"] = toJS(trx.getSender()); + res["gas"] = toJS(trx.getGas()); + res["gasPrice"] = toJS(trx.getGasPrice()); + res["nonce"] = toJS(trx.getNonce()); + res["value"] = toJS(trx.getValue()); + const auto& vrs = trx.getVRS(); + res["r"] = toJS(vrs.r); + res["s"] = toJS(vrs.s); + res["v"] = toJS(vrs.v); + return res; +} + +Json::Value toJson(const LocalisedTransaction& lt) { return toJson(*lt.trx, lt.trx_loc); } + +Json::Value toJson(const BlockHeader& obj) { + Json::Value res(Json::objectValue); + res["parentHash"] = toJS(obj.parent_hash); + res["sha3Uncles"] = toJS(BlockHeader::uncles_hash()); + res["stateRoot"] = toJS(obj.state_root); + res["transactionsRoot"] = toJS(obj.transactions_root); + res["receiptsRoot"] = toJS(obj.receipts_root); + res["number"] = toJS(obj.number); + res["gasUsed"] = toJS(obj.gas_used); + res["gasLimit"] = toJS(obj.gas_limit); + res["extraData"] = toJS(obj.extra_data); + res["logsBloom"] = toJS(obj.log_bloom); + res["timestamp"] = toJS(obj.timestamp); + res["author"] = toJS(obj.author); + res["mixHash"] = toJS(BlockHeader::mix_hash()); + res["nonce"] = toJS(BlockHeader::nonce()); + res["uncles"] = Json::Value(Json::arrayValue); + res["hash"] = toJS(obj.hash); + res["difficulty"] = "0x0"; + res["totalDifficulty"] = "0x0"; + res["totalReward"] = toJS(obj.total_reward); + return res; +} + +Json::Value toJson(const LocalisedLogEntry& lle) { + Json::Value res(Json::objectValue); + add(res, lle.trx_loc); + res["removed"] = false; + res["data"] = toJS(lle.le.data); + res["address"] = toJS(lle.le.address); + res["logIndex"] = toJS(lle.position_in_receipt); + auto& topics_json = res["topics"] = Json::Value(Json::arrayValue); + for (const auto& t : lle.le.topics) { + topics_json.append(toJS(t)); + } + return res; +} + +Json::Value toJson(const LocalisedTransactionReceipt& ltr) { + Json::Value res(Json::objectValue); + add(res, ltr.trx_loc); + res["from"] = toJS(ltr.trx_from); + res["to"] = toJson(ltr.trx_to); + res["status"] = toJS(ltr.r.status_code); + res["gasUsed"] = toJS(ltr.r.gas_used); + res["cumulativeGasUsed"] = toJS(ltr.r.cumulative_gas_used); + res["contractAddress"] = toJson(ltr.r.new_contract_address); + res["logsBloom"] = toJS(ltr.r.bloom()); + auto& logs_json = res["logs"] = Json::Value(Json::arrayValue); + uint log_i = 0; + for (const auto& le : ltr.r.logs) { + logs_json.append(toJson(LocalisedLogEntry{le, ltr.trx_loc, log_i++})); + } + return res; +} + +Json::Value toJson(const SyncStatus& obj) { + Json::Value res(Json::objectValue); + res["startingBlock"] = toJS(obj.starting_block); + res["currentBlock"] = toJS(obj.current_block); + res["highestBlock"] = toJS(obj.highest_block); + return res; +} class EthImpl : public Eth, EthParams { Watches watches_; @@ -32,96 +127,134 @@ class EthImpl : public Eth, EthParams { string eth_blockNumber() override { return toJS(final_chain->last_block_number()); } - string eth_getBalance(string const& _address, string const& _blockNumber) override { - return toJS( - final_chain->get_account(toAddress(_address), parse_blk_num(_blockNumber)).value_or(ZeroAccount).balance); + string eth_getBalance(const string& _address, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(final_chain->get_account(toAddress(_address), block_number).value_or(ZeroAccount).balance); } - string eth_getStorageAt(string const& _address, string const& _position, string const& _blockNumber) override { - return toJS( - final_chain->get_account_storage(toAddress(_address), jsToU256(_position), parse_blk_num(_blockNumber))); + string eth_getStorageAt(const string& _address, const string& _position, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(final_chain->get_account_storage(toAddress(_address), jsToU256(_position), block_number)); } - string eth_getStorageRoot(string const& _address, string const& _blockNumber) override { + string eth_getStorageRoot(const string& _address, const string& _blockNumber) override { return toJS(final_chain->get_account(toAddress(_address), parse_blk_num(_blockNumber)) .value_or(ZeroAccount) .storage_root_eth()); } - string eth_getCode(string const& _address, string const& _blockNumber) override { - return toJS(final_chain->get_code(toAddress(_address), parse_blk_num(_blockNumber))); + string eth_getCode(const string& _address, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(final_chain->get_code(toAddress(_address), block_number)); } - string eth_call(Json::Value const& _json, string const& _blockNumber) override { + string eth_call(const Json::Value& _json, const Json::Value& _jsonBlock) override { + const auto block_number = get_block_number_from_json(_jsonBlock); auto t = toTransactionSkeleton(_json); - auto blk_n = parse_blk_num(_blockNumber); - prepare_transaction_for_call(t, blk_n); - return toJS(call(blk_n, t).code_retval); + prepare_transaction_for_call(t, block_number); + auto ret = call(block_number, t); + if (!ret.consensus_err.empty() || !ret.code_err.empty()) { + throw std::runtime_error(ret.consensus_err.empty() ? ret.code_err : ret.consensus_err); + } + return toJS(ret.code_retval); } - string eth_estimateGas(Json::Value const& _json) override { + string eth_estimateGas(const Json::Value& _json) override { auto t = toTransactionSkeleton(_json); auto blk_n = final_chain->last_block_number(); prepare_transaction_for_call(t, blk_n); - return toJS(call(blk_n, t).gas_used); + + auto is_enough_gas = [&](gas_t gas) -> bool { + t.gas = gas; + auto res = call(blk_n, t); + if (!res.consensus_err.empty()) { + throw std::runtime_error(res.consensus_err); + } + if (!res.code_err.empty()) { + return false; + } + return true; + }; + // couldn't be lower than execution gas_used. So we should start with this value + auto call_result = call(blk_n, t); + if (!call_result.consensus_err.empty() || !call_result.code_err.empty()) { + throw std::runtime_error(call_result.consensus_err.empty() ? call_result.code_err : call_result.consensus_err); + } + gas_t low = call_result.gas_used; + gas_t hi = *t.gas; + if (low > hi) { + throw std::runtime_error("out of gas"); + } + // precision is 5%(1/20) of higher gas_used value + while (hi - low > hi / 20) { + auto mid = low + ((hi - low) / 2); + + if (is_enough_gas(mid)) { + hi = mid; + } else { + low = mid; + } + } + return toJS(hi); } - string eth_getTransactionCount(string const& _address, string const& _blockNumber) override { - return toJS(transaction_count(parse_blk_num(_blockNumber), toAddress(_address))); + string eth_getTransactionCount(const string& _address, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(transaction_count(block_number, toAddress(_address))); } - Json::Value eth_getBlockTransactionCountByHash(string const& _blockHash) override { - return toJson(transactionCount(jsToFixed<32>(_blockHash))); + Json::Value eth_getBlockTransactionCountByHash(const string& _blockHash) override { + return toJS(transactionCount(jsToFixed<32>(_blockHash))); } - Json::Value eth_getBlockTransactionCountByNumber(string const& _blockNumber) override { + Json::Value eth_getBlockTransactionCountByNumber(const string& _blockNumber) override { return toJS(final_chain->transactionCount(parse_blk_num(_blockNumber))); } - Json::Value eth_getUncleCountByBlockHash(string const&) override { return toJS(0); } + Json::Value eth_getUncleCountByBlockHash(const string&) override { return toJS(0); } - Json::Value eth_getUncleCountByBlockNumber(string const&) override { return toJS(0); } + Json::Value eth_getUncleCountByBlockNumber(const string&) override { return toJS(0); } - string eth_sendRawTransaction(string const& _rlp) override { + string eth_sendRawTransaction(const string& _rlp) override { auto trx = std::make_shared(jsToBytes(_rlp, OnFailed::Throw), true); send_trx(trx); return toJS(trx->getHash()); } - Json::Value eth_getBlockByHash(string const& _blockHash, bool _includeTransactions) override { + Json::Value eth_getBlockByHash(const string& _blockHash, bool _includeTransactions) override { if (auto blk_n = final_chain->block_number(jsToFixed<32>(_blockHash)); blk_n) { return get_block_by_number(*blk_n, _includeTransactions); } return Json::Value(); } - Json::Value eth_getBlockByNumber(string const& _blockNumber, bool _includeTransactions) override { + Json::Value eth_getBlockByNumber(const string& _blockNumber, bool _includeTransactions) override { return get_block_by_number(parse_blk_num(_blockNumber), _includeTransactions); } - Json::Value eth_getTransactionByHash(string const& _transactionHash) override { + Json::Value eth_getTransactionByHash(const string& _transactionHash) override { return toJson(get_transaction(jsToFixed<32>(_transactionHash))); } - Json::Value eth_getTransactionByBlockHashAndIndex(string const& _blockHash, - string const& _transactionIndex) override { + Json::Value eth_getTransactionByBlockHashAndIndex(const string& _blockHash, + const string& _transactionIndex) override { return toJson(get_transaction(jsToFixed<32>(_blockHash), jsToInt(_transactionIndex))); } - Json::Value eth_getTransactionByBlockNumberAndIndex(string const& _blockNumber, - string const& _transactionIndex) override { + Json::Value eth_getTransactionByBlockNumberAndIndex(const string& _blockNumber, + const string& _transactionIndex) override { return toJson(get_transaction(jsToInt(_transactionIndex), parse_blk_num(_blockNumber))); } - Json::Value eth_getTransactionReceipt(string const& _transactionHash) override { + Json::Value eth_getTransactionReceipt(const string& _transactionHash) override { return toJson(get_transaction_receipt(jsToFixed<32>(_transactionHash))); } - Json::Value eth_getUncleByBlockHashAndIndex(string const&, string const&) override { return Json::Value(); } + Json::Value eth_getUncleByBlockHashAndIndex(const string&, const string&) override { return Json::Value(); } - Json::Value eth_getUncleByBlockNumberAndIndex(string const&, string const&) override { return Json::Value(); } + Json::Value eth_getUncleByBlockNumberAndIndex(const string&, const string&) override { return Json::Value(); } - string eth_newFilter(Json::Value const& _json) override { + string eth_newFilter(const Json::Value& _json) override { return toJS(watches_.logs_.install_watch(parse_log_filter(_json))); } @@ -129,26 +262,26 @@ class EthImpl : public Eth, EthParams { string eth_newPendingTransactionFilter() override { return toJS(watches_.new_transactions_.install_watch()); } - bool eth_uninstallFilter(string const& _filterId) override { + bool eth_uninstallFilter(const string& _filterId) override { auto watch_id = jsToInt(_filterId); return watches_.visit_by_id(watch_id, [=](auto watch) { return watch && watch->uninstall_watch(watch_id); }); } - Json::Value eth_getFilterChanges(string const& _filterId) override { + Json::Value eth_getFilterChanges(const string& _filterId) override { auto watch_id = jsToInt(_filterId); return watches_.visit_by_id(watch_id, [=](auto watch) { return watch ? toJsonArray(watch->poll(watch_id)) : Json::Value(Json::arrayValue); }); } - Json::Value eth_getFilterLogs(string const& _filterId) override { + Json::Value eth_getFilterLogs(const string& _filterId) override { if (auto filter = watches_.logs_.get_watch_params(jsToInt(_filterId))) { return toJsonArray(filter->match_all(*final_chain)); } return Json::Value(Json::arrayValue); } - Json::Value eth_getLogs(Json::Value const& _json) override { + Json::Value eth_getLogs(const Json::Value& _json) override { return toJsonArray(parse_log_filter(_json).match_all(*final_chain)); } @@ -159,8 +292,8 @@ class EthImpl : public Eth, EthParams { Json::Value eth_chainId() override { return chain_id ? Json::Value(toJS(chain_id)) : Json::Value(); } - void note_block_executed(BlockHeader const& blk_header, SharedTransactions const& trxs, - TransactionReceipts const& receipts) override { + void note_block_executed(const BlockHeader& blk_header, const SharedTransactions& trxs, + const TransactionReceipts& receipts) override { watches_.new_blocks_.process_update(blk_header.hash); ExtendedTransactionLocation trx_loc{{{blk_header.number}, blk_header.hash}}; for (; trx_loc.index < trxs.size(); ++trx_loc.index) { @@ -170,7 +303,7 @@ class EthImpl : public Eth, EthParams { } } - void note_pending_transaction(h256 const& trx_hash) override { watches_.new_transactions_.process_update(trx_hash); } + void note_pending_transaction(const h256& trx_hash) override { watches_.new_transactions_.process_update(trx_hash); } Json::Value get_block_by_number(EthBlockNumber blk_n, bool include_transactions) { auto blk_header = final_chain->block_header(blk_n); @@ -183,20 +316,18 @@ class EthImpl : public Eth, EthParams { ExtendedTransactionLocation loc; loc.blk_n = blk_header->number; loc.blk_h = blk_header->hash; - for (auto const& t : final_chain->transactions(blk_n)) { + for (const auto& t : final_chain->transactions(blk_n)) { trxs_json.append(toJson(*t, loc)); ++loc.index; } } else { auto hashes = final_chain->transaction_hashes(blk_n); - for (size_t i = 0; i < hashes->count(); ++i) { - trxs_json.append(toJson(hashes->get(i))); - } + trxs_json = toJsonArray(*hashes); } return ret; } - optional get_transaction(h256 const& h) const { + optional get_transaction(const h256& h) const { auto trx = get_trx(h); if (!trx) { return {}; @@ -213,11 +344,11 @@ class EthImpl : public Eth, EthParams { optional get_transaction(uint64_t trx_pos, EthBlockNumber blk_n) const { auto hashes = final_chain->transaction_hashes(blk_n); - if (hashes->count() <= trx_pos) { + if (hashes->size() <= trx_pos) { return {}; } return LocalisedTransaction{ - get_trx(hashes->get(trx_pos)), + get_trx(hashes->at(trx_pos)), TransactionLocationWithBlockHash{ {blk_n, trx_pos}, *final_chain->block_hash(blk_n), @@ -225,18 +356,18 @@ class EthImpl : public Eth, EthParams { }; } - optional get_transaction(h256 const& blk_h, uint64_t _i) const { + optional get_transaction(const h256& blk_h, uint64_t _i) const { auto blk_n = final_chain->block_number(blk_h); return blk_n ? get_transaction(_i, *blk_n) : nullopt; } - optional get_transaction_receipt(h256 const& trx_h) const { + optional get_transaction_receipt(const h256& trx_h) const { auto r = final_chain->transaction_receipt(trx_h); if (!r) { return {}; } auto loc_trx = get_transaction(trx_h); - auto const& trx = loc_trx->trx; + const auto& trx = loc_trx->trx; return LocalisedTransactionReceipt{ *r, ExtendedTransactionLocation{*loc_trx->trx_loc, trx_h}, @@ -245,16 +376,16 @@ class EthImpl : public Eth, EthParams { }; } - uint64_t transactionCount(h256 const& block_hash) const { + uint64_t transactionCount(const h256& block_hash) const { auto n = final_chain->block_number(block_hash); return n ? final_chain->transactionCount(n) : 0; } - trx_nonce_t transaction_count(EthBlockNumber n, Address const& addr) { + trx_nonce_t transaction_count(EthBlockNumber n, const Address& addr) { return final_chain->get_account(addr, n).value_or(ZeroAccount).nonce; } - state_api::ExecutionResult call(EthBlockNumber blk_n, TransactionSkeleton const& trx) { + state_api::ExecutionResult call(EthBlockNumber blk_n, const TransactionSkeleton& trx) { const auto result = final_chain->call( { trx.from, @@ -267,10 +398,7 @@ class EthImpl : public Eth, EthParams { }, blk_n); - if (result.consensus_err.empty() && result.code_err.empty()) { - return result; - } - throw std::runtime_error(result.consensus_err.empty() ? result.code_err : result.consensus_err); + return result; } // this should be used only in eth_call and eth_estimateGas @@ -290,7 +418,7 @@ class EthImpl : public Eth, EthParams { } DEV_SIMPLE_EXCEPTION(InvalidAddress); - static Address toAddress(string const& s) { + static Address toAddress(const string& s) { try { if (auto b = fromHex(s.substr(0, 2) == "0x" ? s.substr(2) : s, WhenError::Throw); b.size() == Address::size) { return Address(b); @@ -300,7 +428,7 @@ class EthImpl : public Eth, EthParams { BOOST_THROW_EXCEPTION(InvalidAddress()); } - static TransactionSkeleton toTransactionSkeleton(Json::Value const& _json) { + static TransactionSkeleton toTransactionSkeleton(const Json::Value& _json) { TransactionSkeleton ret; if (!_json.isObject() || _json.empty()) { return ret; @@ -332,45 +460,60 @@ class EthImpl : public Eth, EthParams { return ret; } - static optional parse_blk_num_specific(string const& blk_num_str) { - if (blk_num_str == "latest" || blk_num_str == "pending") { + static optional parse_blk_num_specific(const string& blk_num_str) { + if (blk_num_str == "latest" || blk_num_str == "pending" || blk_num_str == "safe" || blk_num_str == "finalized") { return std::nullopt; } return blk_num_str == "earliest" ? 0 : jsToInt(blk_num_str); } - EthBlockNumber parse_blk_num(string const& blk_num_str) { + EthBlockNumber parse_blk_num(const string& blk_num_str) { auto ret = parse_blk_num_specific(blk_num_str); return ret ? *ret : final_chain->last_block_number(); } - LogFilter parse_log_filter(Json::Value const& json) { + EthBlockNumber get_block_number_from_json(const Json::Value& json) { + if (json.isObject()) { + if (!json["blockNumber"].empty()) { + return parse_blk_num(json["blockNumber"].asString()); + } + if (!json["blockHash"].empty()) { + if (auto ret = final_chain->block_number(jsToFixed<32>(json["blockHash"].asString()))) { + return *ret; + } + throw std::runtime_error("Resource not found"); + } + } + return parse_blk_num(json.asString()); + } + + LogFilter parse_log_filter(const Json::Value& json) { EthBlockNumber from_block; optional to_block; AddressSet addresses; LogFilter::Topics topics; - if (auto const& fromBlock = json["fromBlock"]; !fromBlock.empty()) { + if (const auto& fromBlock = json["fromBlock"]; !fromBlock.empty()) { from_block = parse_blk_num(fromBlock.asString()); } else { from_block = final_chain->last_block_number(); } - if (auto const& toBlock = json["toBlock"]; !toBlock.empty()) { + if (const auto& toBlock = json["toBlock"]; !toBlock.empty()) { to_block = parse_blk_num_specific(toBlock.asString()); } - if (auto const& address = json["address"]; !address.empty()) { + if (const auto& address = json["address"]; !address.empty()) { if (address.isArray()) { - for (auto const& obj : address) { + for (const auto& obj : address) { addresses.insert(toAddress(obj.asString())); } } else { addresses.insert(toAddress(address.asString())); } } - if (auto const& topics_json = json["topics"]; !topics_json.empty()) { + if (const auto& topics_json = json["topics"]; !topics_json.empty()) { for (uint32_t i = 0; i < topics_json.size(); i++) { - auto const& topic_json = topics_json[i]; + const auto& topic_json = topics_json[i]; if (topic_json.isArray()) { - for (auto const& t : topic_json) { + for (const auto& t : topic_json) { if (!t.isNull()) { topics[i].insert(jsToFixed<32>(t.asString())); } @@ -382,124 +525,8 @@ class EthImpl : public Eth, EthParams { } return LogFilter(from_block, to_block, std::move(addresses), std::move(topics)); } - - static void add(Json::Value& obj, optional const& info) { - obj["blockNumber"] = info ? toJson(info->blk_n) : Json::Value(); - obj["blockHash"] = info ? toJson(info->blk_h) : Json::Value(); - obj["transactionIndex"] = info ? toJson(info->index) : Json::Value(); - } - - static void add(Json::Value& obj, ExtendedTransactionLocation const& info) { - add(obj, static_cast(info)); - obj["transactionHash"] = toJson(info.trx_hash); - } - - static Json::Value toJson(Transaction const& trx, optional const& loc) { - Json::Value res(Json::objectValue); - add(res, loc); - res["hash"] = toJson(trx.getHash()); - res["input"] = toJson(trx.getData()); - res["to"] = toJson(trx.getReceiver()); - res["from"] = toJson(trx.getSender()); - res["gas"] = toJson(trx.getGas()); - res["gasPrice"] = toJson(trx.getGasPrice()); - res["nonce"] = toJson(trx.getNonce()); - res["value"] = toJson(trx.getValue()); - auto const& vrs = trx.getVRS(); - res["r"] = toJson(vrs.r); - res["s"] = toJson(vrs.s); - res["v"] = toJson(vrs.v); - return res; - } - - static Json::Value toJson(const LocalisedTransaction& lt) { return toJson(*lt.trx, lt.trx_loc); } - - static Json::Value toJson(BlockHeader const& obj) { - Json::Value res(Json::objectValue); - res["parentHash"] = toJson(obj.parent_hash); - res["sha3Uncles"] = toJson(BlockHeader::uncles_hash()); - res["stateRoot"] = toJson(obj.state_root); - res["transactionsRoot"] = toJson(obj.transactions_root); - res["receiptsRoot"] = toJson(obj.receipts_root); - res["number"] = toJson(obj.number); - res["gasUsed"] = toJson(obj.gas_used); - res["gasLimit"] = toJson(obj.gas_limit); - res["extraData"] = toJson(obj.extra_data); - res["logsBloom"] = toJson(obj.log_bloom); - res["timestamp"] = toJson(obj.timestamp); - res["author"] = toJson(obj.author); - res["mixHash"] = toJson(BlockHeader::mix_hash()); - res["nonce"] = toJson(BlockHeader::nonce()); - res["uncles"] = Json::Value(Json::arrayValue); - res["hash"] = toJson(obj.hash); - res["difficulty"] = "0x0"; - res["totalDifficulty"] = "0x0"; - res["totalReward"] = toJson(obj.total_reward); - return res; - } - - static Json::Value toJson(LocalisedLogEntry const& lle) { - Json::Value res(Json::objectValue); - add(res, lle.trx_loc); - res["removed"] = false; - res["data"] = toJson(lle.le.data); - res["address"] = toJson(lle.le.address); - res["logIndex"] = toJson(lle.position_in_receipt); - auto& topics_json = res["topics"] = Json::Value(Json::arrayValue); - for (auto const& t : lle.le.topics) { - topics_json.append(toJson(t)); - } - return res; - } - - static Json::Value toJson(LocalisedTransactionReceipt const& ltr) { - Json::Value res(Json::objectValue); - add(res, ltr.trx_loc); - res["from"] = toJson(ltr.trx_from); - res["to"] = toJson(ltr.trx_to); - res["status"] = toJson(ltr.r.status_code); - res["gasUsed"] = toJson(ltr.r.gas_used); - res["cumulativeGasUsed"] = toJson(ltr.r.cumulative_gas_used); - res["contractAddress"] = toJson(ltr.r.new_contract_address); - res["logsBloom"] = toJson(ltr.r.bloom()); - auto& logs_json = res["logs"] = Json::Value(Json::arrayValue); - uint log_i = 0; - for (auto const& le : ltr.r.logs) { - logs_json.append(toJson(LocalisedLogEntry{le, ltr.trx_loc, log_i++})); - } - return res; - } - - static Json::Value toJson(SyncStatus const& obj) { - Json::Value res(Json::objectValue); - res["startingBlock"] = toJS(obj.starting_block); - res["currentBlock"] = toJS(obj.current_block); - res["highestBlock"] = toJS(obj.highest_block); - return res; - } - - template - static Json::Value toJson(T const& t) { - return toJS(t); - } - - template - static Json::Value toJsonArray(vector const& _es) { - Json::Value res(Json::arrayValue); - for (auto const& e : _es) { - res.append(toJson(e)); - } - return res; - } - - template - static Json::Value toJson(optional const& t) { - return t ? toJson(*t) : Json::Value(); - } }; -Json::Value toJson(BlockHeader const& obj) { return EthImpl::toJson(obj); } - shared_ptr NewEth(EthParams&& prerequisites) { return make_shared(std::move(prerequisites)); } } // namespace taraxa::net::rpc::eth \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/eth/Eth.h b/libraries/core_libs/network/rpc/eth/Eth.h index 6a256cae83..acf361e0b7 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.h +++ b/libraries/core_libs/network/rpc/eth/Eth.h @@ -1,18 +1,48 @@ #pragma once +#include "data.hpp" #include "final_chain/final_chain.hpp" #include "network/rpc/EthFace.h" #include "watches.hpp" namespace taraxa::net::rpc::eth { +void add(Json::Value& obj, const std::optional& info); +void add(Json::Value& obj, const ExtendedTransactionLocation& info); +Json::Value toJson(const final_chain::BlockHeader& obj); +Json::Value toJson(const Transaction& trx, const std::optional& loc); +Json::Value toJson(const LocalisedTransaction& lt); +Json::Value toJson(const final_chain::BlockHeader& obj); +Json::Value toJson(const LocalisedLogEntry& lle); +Json::Value toJson(const LocalisedTransactionReceipt& ltr); +Json::Value toJson(const SyncStatus& obj); + +template +Json::Value toJson(const T& t) { + return toJS(t); +} + +template +Json::Value toJsonArray(const std::vector& _es) { + Json::Value res(Json::arrayValue); + for (const auto& e : _es) { + res.append(toJson(e)); + } + return res; +} + +template +Json::Value toJson(const std::optional& t) { + return t ? toJson(*t) : Json::Value(); +} + struct EthParams { Address address; uint64_t chain_id = 0; uint64_t gas_limit = ((uint64_t)1 << 53) - 1; std::shared_ptr final_chain; - std::function(h256 const&)> get_trx; - std::function const& trx)> send_trx; + std::function(const h256&)> get_trx; + std::function& trx)> send_trx; std::function gas_pricer = [] { return u256(0); }; std::function()> syncing_probe = [] { return std::nullopt; }; WatchesConfig watches_cfg; @@ -29,13 +59,11 @@ struct Eth : virtual ::taraxa::net::EthFace { ::taraxa::net::EthFace::operator=(std::move(rhs)); return *this; } - virtual void note_block_executed(final_chain::BlockHeader const&, SharedTransactions const&, - final_chain::TransactionReceipts const&) = 0; - virtual void note_pending_transaction(h256 const& trx_hash) = 0; + virtual void note_block_executed(const final_chain::BlockHeader&, const SharedTransactions&, + const final_chain::TransactionReceipts&) = 0; + virtual void note_pending_transaction(const h256& trx_hash) = 0; }; std::shared_ptr NewEth(EthParams&&); -Json::Value toJson(final_chain::BlockHeader const& obj); - } // namespace taraxa::net::rpc::eth diff --git a/libraries/core_libs/network/rpc/eth/LogFilter.cpp b/libraries/core_libs/network/rpc/eth/LogFilter.cpp index 1158486a5a..3969a89ae0 100644 --- a/libraries/core_libs/network/rpc/eth/LogFilter.cpp +++ b/libraries/core_libs/network/rpc/eth/LogFilter.cpp @@ -8,19 +8,14 @@ LogFilter::LogFilter(EthBlockNumber from_block, std::optional to if (!addresses_.empty()) { return; } - for (auto const& t : topics_) { - if (!t.empty()) { - return; - } - } - is_range_only_ = true; + is_range_only_ = std::all_of(topics_.cbegin(), topics_.cend(), [](const auto& t) { return t.empty(); }); } std::vector LogFilter::bloomPossibilities() const { // return combination of each of the addresses/topics std::vector ret; // | every address with every topic - for (auto const& i : addresses_) { + for (const auto& i : addresses_) { // 1st case, there are addresses and topics // // m_addresses = [a0, a1]; @@ -31,12 +26,12 @@ std::vector LogFilter::bloomPossibilities() const { // a1 | t0, a1 | t1a | t1b // ] // - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (t.empty()) { continue; } auto b = LogBloom().shiftBloom<3>(sha3(i)); - for (auto const& j : t) { + for (const auto& j : t) { b = b.shiftBloom<3>(sha3(j)); } ret.push_back(b); @@ -51,9 +46,8 @@ std::vector LogFilter::bloomPossibilities() const { // blooms = [a0, a1]; // if (ret.empty()) { - for (auto const& i : addresses_) { - ret.push_back(LogBloom().shiftBloom<3>(sha3(i))); - } + std::transform(addresses_.cbegin(), addresses_.cend(), std::back_inserter(ret), + [](const auto& i) { return LogBloom().shiftBloom<3>(sha3(i)); }); } // 3rd case, there are no addresses, at least create blooms from topics @@ -64,10 +58,10 @@ std::vector LogFilter::bloomPossibilities() const { // blooms = [t0, t1a | t1b]; // if (addresses_.empty()) { - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (t.size()) { LogBloom b; - for (auto const& j : t) { + for (const auto& j : t) { b = b.shiftBloom<3>(sha3(j)); } ret.push_back(b); @@ -80,7 +74,7 @@ std::vector LogFilter::bloomPossibilities() const { bool LogFilter::matches(LogBloom b) const { if (!addresses_.empty()) { auto ok = false; - for (auto const& i : addresses_) { + for (const auto& i : addresses_) { if (b.containsBloom<3>(sha3(i))) { ok = true; break; @@ -90,12 +84,12 @@ bool LogFilter::matches(LogBloom b) const { return false; } } - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (t.empty()) { continue; } auto ok = false; - for (auto const& i : t) { + for (const auto& i : t) { if (b.containsBloom<3>(sha3(i))) { ok = true; break; @@ -108,12 +102,12 @@ bool LogFilter::matches(LogBloom b) const { return true; } -void LogFilter::match_one(TransactionReceipt const& r, std::function const& cb) const { +void LogFilter::match_one(const TransactionReceipt& r, const std::function& cb) const { if (!matches(r.bloom())) { return; } for (size_t log_i = 0; log_i < r.logs.size(); ++log_i) { - auto const& e = r.logs[log_i]; + const auto& e = r.logs[log_i]; if (!addresses_.empty() && !addresses_.count(e.address)) { continue; } @@ -134,8 +128,8 @@ bool LogFilter::blk_number_matches(EthBlockNumber blk_n) const { return from_block_ <= blk_n && (!to_block_ || blk_n <= *to_block_); } -void LogFilter::match_one(ExtendedTransactionLocation const& trx_loc, TransactionReceipt const& r, - std::function const& cb) const { +void LogFilter::match_one(const ExtendedTransactionLocation& trx_loc, const TransactionReceipt& r, + const std::function& cb) const { if (!blk_number_matches(trx_loc.blk_n)) { return; } @@ -149,15 +143,14 @@ void LogFilter::match_one(ExtendedTransactionLocation const& trx_loc, Transactio } } -std::vector LogFilter::match_all(FinalChain const& final_chain) const { +std::vector LogFilter::match_all(const FinalChain& final_chain) const { std::vector ret; auto action = [&, this](EthBlockNumber blk_n) { ExtendedTransactionLocation trx_loc{{{blk_n}, *final_chain.block_hash(blk_n)}}; auto hashes = final_chain.transaction_hashes(trx_loc.blk_n); - for (size_t i = 0; i < hashes->count(); ++i) { - trx_loc.trx_hash = hashes->get(i); - match_one(trx_loc, *final_chain.transaction_receipt(trx_loc.trx_hash), - [&](auto const& lle) { ret.push_back(lle); }); + for (const auto& hash : *hashes) { + trx_loc.trx_hash = hash; + match_one(trx_loc, *final_chain.transaction_receipt(hash), [&](const auto& lle) { ret.push_back(lle); }); ++trx_loc.index; } }; @@ -169,7 +162,7 @@ std::vector LogFilter::match_all(FinalChain const& final_chai return ret; } std::set matchingBlocks; - for (auto const& bloom : bloomPossibilities()) { + for (const auto& bloom : bloomPossibilities()) { for (auto blk_n : final_chain.withBlockBloom(bloom, from_block_, to_blk_n)) { matchingBlocks.insert(blk_n); } diff --git a/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp b/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp index 50de2ee673..269d196ba6 100644 --- a/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp +++ b/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp @@ -56,8 +56,8 @@ std::string JsonRpcWsSession::processRequest(const std::string_view &request) { auto handler = ws_server->GetHandler(); if (handler != NULL) { try { - LOG(log_tr_) << "WS Read: " << (char *)buffer_.data().data(); - handler->HandleRequest((char *)buffer_.data().data(), response); + LOG(log_tr_) << "WS Read: " << static_cast(buffer_.data().data()); + handler->HandleRequest(static_cast(buffer_.data().data()), response); } catch (std::exception const &e) { LOG(log_er_) << "Exception " << e.what(); auto &res_json_error = json_response["error"] = Json::Value(Json::objectValue); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp index 5ddd8bacab..d8862d06df 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp @@ -30,7 +30,7 @@ bool ExtVotesPacketHandler::processVote(const std::shared_ptr &vote, const return false; } - // Validate vote's period, roun and step min/max values + // Validate vote's period, round and step min/max values if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { LOG(log_wr_) << "Vote period/round/step " << vote->getHash() << " validation failed. Err: " << vote_valid.second; return false; @@ -85,7 +85,8 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract // Do not request round sync too often here - if (std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { // request PBFT chain sync from this node sealAndSend(peer->getId(), SubprotocolPacketType::GetPbftSyncPacket, std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); @@ -113,7 +114,8 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( // Trigger votes(round) syncing only if we are in sync in terms of period if (current_pbft_period == vote->getPeriod()) { // Do not request round sync too often here - if (std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { // request round votes sync from this node requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); last_votes_sync_request_time_ = std::chrono::system_clock::now(); @@ -140,21 +142,6 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( return {true, ""}; } -std::pair ExtVotesPacketHandler::validateVote(const std::shared_ptr &vote) const { - // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote - // (for a value that isn't NBH) per period, round & step - if (auto unique_vote_validation = vote_mgr_->isUniqueVote(vote); !unique_vote_validation.first) { - return unique_vote_validation; - } - - const auto vote_valid = vote_mgr_->validateVote(vote); - if (!vote_valid.first) { - LOG(log_er_) << "Vote \"dpos\" validation failed: " << vote_valid.second; - } - - return vote_valid; -} - bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vote, const std::shared_ptr &pbft_block) const { if (pbft_block->getBlockHash() != vote->getBlockHash()) { @@ -162,33 +149,21 @@ bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vo << pbft_block->getBlockHash(); return false; } - // TODO[2401]: move this check to PBFT block - std::unordered_set set; - const auto reward_votes = pbft_block->getRewardVotes(); - set.reserve(reward_votes.size()); - for (const auto &hash : reward_votes) { - if (!set.insert(hash).second) { - LOG(log_er_) << "PBFT block " << pbft_block->getBlockHash() << " proposed by " << pbft_block->getBeneficiary() - << " has duplicated vote " << hash; - return false; - } - } - return true; } bool ExtVotesPacketHandler::isPbftRelevantVote(const std::shared_ptr &vote) const { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - // Previous round next vote - if (vote->getPeriod() == current_pbft_period && (current_pbft_round - 1) == vote->getRound() && - vote->getType() == PbftVoteTypes::next_vote) { + if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { + // Standard current or future vote return true; - } else if (vote->getPeriod() >= current_pbft_period) { - // Standard vote + } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && + vote->getType() == PbftVoteTypes::next_vote) { + // Previous round next vote return true; } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { - // Previous round cert vote - potential reward vote + // Previous period cert vote - potential reward vote return true; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_block_packet_handler.cpp index 24f984aaf1..d71dfbfe15 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_block_packet_handler.cpp @@ -105,7 +105,7 @@ void DagBlockPacketHandler::onNewBlockReceived(DagBlock &&block, const std::shar case DagManager::VerifyBlockReturnType::NotEligible: case DagManager::VerifyBlockReturnType::FailedTipsVerification: { std::ostringstream err_msg; - err_msg << "DagBlock" << block_hash << " failed verification with error code " + err_msg << "DagBlock " << block_hash << " failed verification with error code " << static_cast(verified); throw MaliciousPeerException(err_msg.str()); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp index ed1406e50f..7d1b04acc7 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp @@ -3,6 +3,7 @@ #include "dag/dag.hpp" #include "network/tarcap/packets_handlers/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" namespace taraxa::network::tarcap { @@ -61,7 +62,7 @@ void DagSyncPacketHandler::process(const PacketData& packet_data, const std::sha auto trx = std::make_shared(tx_rlp); peer->markTransactionAsKnown(trx->getHash()); transactions.emplace_back(std::move(trx)); - } catch (const Transaction::InvalidSignature& e) { + } catch (const Transaction::InvalidTransaction& e) { throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); } } @@ -119,7 +120,7 @@ void DagSyncPacketHandler::process(const PacketData& packet_data, const std::sha const auto verified = dag_mgr_->verifyBlock(block); if (verified != DagManager::VerifyBlockReturnType::Verified) { std::ostringstream err_msg; - err_msg << "DagBlock" << block.getHash() << " failed verification with error code " + err_msg << "DagBlock " << block.getHash() << " failed verification with error code " << static_cast(verified); throw MaliciousPeerException(err_msg.str()); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp index 6a6baa27ca..3b9ddb71c3 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp @@ -33,8 +33,7 @@ void GetNextVotesSyncPacketHandler::process(const PacketData &packet_data, const return; } - std::vector> next_votes = - vote_mgr_->getAllTwoTPlusOneNextVotes(pbft_period, pbft_round - 1, peer); + std::vector> next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(pbft_period, pbft_round - 1); // In edge case this could theoretically happen due to race condition when we moved to the next period or round // right before calling getAllTwoTPlusOneNextVotes with specific period & round if (next_votes.empty()) { @@ -52,13 +51,27 @@ void GetNextVotesSyncPacketHandler::process(const PacketData &packet_data, const return; } - next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(tmp_pbft_period, tmp_pbft_round - 1, peer); + next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(tmp_pbft_period, tmp_pbft_round - 1); if (next_votes.empty()) { LOG(log_er_) << "No next votes returned for period " << tmp_pbft_period << ", round " << tmp_pbft_round - 1; return; } } + std::vector> next_votes_to_send; + next_votes_to_send.reserve(next_votes.size()); + for (const auto &vote : next_votes) { + if (!peer->isVoteKnown(vote->getHash())) { + next_votes_to_send.emplace_back(vote); + } + } + + if (next_votes_to_send.empty()) { + LOG(log_dg_) << "Votes already gossiped, no need to send votes sync packet for" << pbft_period << ", round " + << pbft_round - 1; + return; + } + LOG(log_nf_) << "Next votes sync packet with " << next_votes.size() << " votes sent to " << peer->getId(); sendPbftVotesBundle(peer, std::move(next_votes)); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp index 2a0de430d9..7d569f08ca 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp @@ -4,17 +4,20 @@ #include "pbft/pbft_chain.hpp" #include "storage/storage.hpp" #include "vote/vote.hpp" +#include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap { GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, - std::shared_ptr pbft_chain, std::shared_ptr db, + std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t &node_addr) : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, "GET_PBFT_SYNC_PH"), pbft_syncing_state_(std::move(pbft_syncing_state)), pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), db_(std::move(db)) {} void GetPbftSyncPacketHandler::validatePacketRlpFormat(const PacketData &packet_data) const { @@ -56,12 +59,13 @@ void GetPbftSyncPacketHandler::process(const PacketData &packet_data, } LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << packet_data.from_node_id_; - sendPbftBlocks(packet_data.from_node_id_, height_to_sync, blocks_to_transfer, pbft_chain_synced); + sendPbftBlocks(peer, height_to_sync, blocks_to_transfer, pbft_chain_synced); } // api for pbft syncing -void GetPbftSyncPacketHandler::sendPbftBlocks(dev::p2p::NodeID const &peer_id, PbftPeriod from_period, +void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr &peer, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced) { + const auto &peer_id = peer->getId(); LOG(log_tr_) << "sendPbftBlocks: peer want to sync from pbft chain height " << from_period << ", will send at most " << blocks_to_transfer << " pbft blocks to " << peer_id; @@ -80,7 +84,7 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(dev::p2p::NodeID const &peer_id, P s << last_block; s.appendRaw(data); // Latest finalized block cert votes are saved in db as reward votes for new blocks - const auto votes = db_->getRewardVotes(); + const auto votes = vote_mgr_->getRewardVotes(); s.appendList(votes.size()); for (const auto &vote : votes) { s.appendRaw(vote->rlp(true)); @@ -92,6 +96,9 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(dev::p2p::NodeID const &peer_id, P } LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; sealAndSend(peer_id, SubprotocolPacketType::PbftSyncPacket, std::move(s)); + if (pbft_chain_synced && last_block) { + peer->syncing_ = false; + } } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp index 01ba610915..ed574aa4fe 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp @@ -60,7 +60,7 @@ void PbftSyncPacketHandler::process(const PacketData &packet_data, const std::sh PeriodData period_data; try { period_data = PeriodData(packet_data.rlp_[1]); - } catch (const Transaction::InvalidSignature &e) { + } catch (const std::runtime_error &e) { throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp index 0b7d5387fc..6132d0091c 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp @@ -3,6 +3,7 @@ #include #include "network/tarcap/shared_states/test_state.hpp" +#include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" namespace taraxa::network::tarcap { @@ -61,7 +62,7 @@ inline void TransactionPacketHandler::process(const PacketData &packet_data, con try { transaction = std::make_shared(packet_data.rlp_[1][tx_idx].data().toBytes()); received_transactions.emplace_back(trx_hash); - } catch (const Transaction::InvalidSignature &e) { + } catch (const Transaction::InvalidTransaction &e) { throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); } @@ -126,11 +127,7 @@ void TransactionPacketHandler::periodicSendTransactions(SharedTransactions &&tra std::vector> peers_with_transactions_to_send; auto peers = peers_state_->getAllPeers(); - std::string transactions_to_log; std::string peers_to_log; - for (auto const &trx : transactions) { - transactions_to_log += trx->getHash().abridged(); - } for (const auto &peer : peers) { // Confirm that status messages were exchanged otherwise message might be ignored and node would // incorrectly markTransactionAsKnown @@ -149,6 +146,9 @@ void TransactionPacketHandler::periodicSendTransactions(SharedTransactions &&tra } const auto peers_to_send_count = peers_with_transactions_to_send.size(); if (peers_to_send_count > 0) { + auto transactions_to_log = + std::accumulate(transactions.begin(), transactions.end(), std::string{}, + [](const auto &r, const auto &trx) { return r + trx->getHash().abridged(); }); LOG(log_tr_) << "Sending Transactions " << transactions_to_log << " to " << peers_to_log; // Sending it in same order favours some peers over others, always start with a different position uint32_t start_with = rand() % peers_to_send_count; @@ -159,8 +159,9 @@ void TransactionPacketHandler::periodicSendTransactions(SharedTransactions &&tra } } -void TransactionPacketHandler::sendTransactions(std::shared_ptr const &peer, +void TransactionPacketHandler::sendTransactions(std::shared_ptr peer, std::vector> &&transactions) { + if (!peer) return; const auto peer_id = peer->getId(); LOG(log_tr_) << "sendTransactions " << transactions.size() << " to " << peer_id; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp index bf494f7e32..5810b0518a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp @@ -29,13 +29,22 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared std::shared_ptr vote = std::make_shared(packet_data.rlp_[0]); if (const size_t item_count = packet_data.rlp_.itemCount(); item_count == kExtendedVotePacketSize) { - pbft_block = std::make_shared(packet_data.rlp_[1]); + try { + pbft_block = std::make_shared(packet_data.rlp_[1]); + } catch (const std::exception &e) { + throw MaliciousPeerException(e.what()); + } peer_chain_size = packet_data.rlp_[2].toInt(); LOG(log_dg_) << "Received PBFT vote " << vote->getHash() << " with PBFT block " << pbft_block->getBlockHash(); } else { LOG(log_dg_) << "Received PBFT vote " << vote->getHash(); } + // Update peer's max chain size + if (peer_chain_size.has_value() && *peer_chain_size > peer->pbft_chain_size_) { + peer->pbft_chain_size_ = *peer_chain_size; + } + const auto vote_hash = vote->getHash(); if (!isPbftRelevantVote(vote)) { @@ -63,26 +72,24 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared peer->markPbftBlockAsKnown(pbft_block->getBlockHash()); } - processVote(vote, pbft_block, peer, true); + if (!processVote(vote, pbft_block, peer, true)) { + return; + } // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes peer->markVoteAsKnown(vote_hash); onNewPbftVote(vote, pbft_block); - - // Update peer's max chain size - if (peer_chain_size.has_value() && *peer_chain_size > peer->pbft_chain_size_) { - peer->pbft_chain_size_ = *peer_chain_size; - } } -void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block) { +void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, + bool rebroadcast) { for (const auto &peer : peers_state_->getAllPeers()) { if (peer.second->syncing_) { LOG(log_dg_) << " PBFT vote " << vote->getHash() << " not sent to " << peer.first << " peer syncing"; continue; } - if (peer.second->isVoteKnown(vote->getHash())) { + if (!rebroadcast && peer.second->isVoteKnown(vote->getHash())) { continue; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp index bdd4d32660..3f38145c36 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp @@ -113,9 +113,15 @@ void VotesSyncPacketHandler::process(const PacketData &packet_data, const std::s // Process processStandardVote is called with false in case of next votes bundle -> does not check max boundaries // for round and step to actually being able to sync the current round in case network is stalled - bool check_max_round_step = votes_bundle_votes_type == PbftVoteTypes::next_vote ? false : true; - if (votes_bundle_votes_type == PbftVoteTypes::cert_vote) check_max_round_step = false; - processVote(vote, nullptr, peer, check_max_round_step); + bool check_max_round_step = true; + if (votes_bundle_votes_type == PbftVoteTypes::cert_vote || votes_bundle_votes_type == PbftVoteTypes::next_vote) { + check_max_round_step = false; + } + + if (!processVote(vote, nullptr, peer, check_max_round_step)) { + continue; + } + votes.push_back(std::move(vote)); } diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index c031e3114e..e7d69fb153 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -68,7 +68,7 @@ void TaraxaCapability::init(const h256 &genesis_hash, std::shared_ptr node_addr); // Inits periodic events. Must be called after registerHandlers !!! - initPeriodicEvents(pbft_mgr, db, trx_mgr, all_packets_stats_); + initPeriodicEvents(pbft_mgr, trx_mgr, all_packets_stats_); } void TaraxaCapability::addBootNodes(bool initial) { @@ -105,6 +105,11 @@ void TaraxaCapability::addBootNodes(bool initial) { continue; } + if (host->nodeTableHasNode(pub)) { + LOG(log_dg_) << "skipping node " << node.id << " already in table"; + continue; + } + auto ip = resolveHost(node.ip, node.port); LOG(log_nf_) << "Adding boot node:" << node.ip << ":" << node.port << " " << ip.second.address().to_string(); dev::p2p::Node boot_node(pub, dev::p2p::NodeIPEndpoint(ip.second.address(), node.port, node.port), @@ -117,7 +122,6 @@ void TaraxaCapability::addBootNodes(bool initial) { } void TaraxaCapability::initPeriodicEvents(const std::shared_ptr &pbft_mgr, - const std::shared_ptr &db, std::shared_ptr trx_mgr, std::shared_ptr packets_stats) { // TODO: refactor this: @@ -173,30 +177,6 @@ void TaraxaCapability::initPeriodicEvents(const std::shared_ptr &pb addBootNodes(); } }); - - // If period and round did not change after 60 seconds from node start, rebroadcast own pbft votes - if (pbft_mgr && db /* just because of tests */) { - auto vote_packet_handler = packets_handlers_->getSpecificHandler(); - const auto [init_round, init_period] = pbft_mgr->getPbftRoundAndPeriod(); - periodic_events_tp_->post(60000, [init_round = init_round, init_period = init_period, db = db, pbft_mgr = pbft_mgr, - vote_packet_handler = std::move(vote_packet_handler)] { - const auto [curent_round, curent_period] = pbft_mgr->getPbftRoundAndPeriod(); - if (curent_period != init_period || curent_round != init_round) { - return; - } - - const auto own_votes = db->getOwnVerifiedVotes(); - if (own_votes.empty()) { - return; - } - - // Send votes by one as votes sync packet must contain votes with the same type, period and round - for (const auto &vote : own_votes) { - vote_packet_handler->onNewPbftVote(vote, - pbft_mgr->getPbftProposedBlock(vote->getPeriod(), vote->getBlockHash())); - } - }); - } } void TaraxaCapability::registerPacketHandlers( @@ -237,7 +217,7 @@ void TaraxaCapability::registerPacketHandlers( // TODO there is additional logic, that should be moved outside process function packets_handlers_->registerHandler(kConf, peers_state_, packets_stats, pbft_syncing_state_, - pbft_chain, db, node_addr); + pbft_chain, vote_mgr, db, node_addr); packets_handlers_->registerHandler(kConf, peers_state_, packets_stats, pbft_syncing_state_, pbft_chain, pbft_mgr, dag_mgr, vote_mgr, diff --git a/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp b/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp index 67a4259c65..220a0131f8 100644 --- a/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp +++ b/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp @@ -114,13 +114,7 @@ std::optional PriorityQueue::pop() { } bool PriorityQueue::empty() const { - for (const auto& queue : packets_queues_) { - if (!queue.empty()) { - return false; - } - } - - return true; + return std::all_of(packets_queues_.cbegin(), packets_queues_.cend(), [](const auto& queue) { return queue.empty(); }); } void PriorityQueue::updateDependenciesStart(const PacketData& packet) { diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index 2f85fb1c1e..03f0bc7da5 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -51,7 +51,7 @@ void WsSession::on_read(beast::error_code ec, std::size_t bytes_transferred) { return close(is_normal(ec)); } - LOG(log_tr_) << "WS READ " << ((char *)buffer_.data().data()); + LOG(log_tr_) << "WS READ " << (static_cast(buffer_.data().data())); const std::string_view str_view(static_cast(buffer_.data().data()), buffer_.size()); const auto response = processRequest(str_view); @@ -89,12 +89,13 @@ void WsSession::on_write_no_read(beast::error_code ec, std::size_t bytes_transfe } } -void WsSession::newEthBlock(::taraxa::final_chain::BlockHeader const &payload) { +void WsSession::newEthBlock(const ::taraxa::final_chain::BlockHeader &payload, const TransactionHashes &trx_hashes) { if (new_heads_subscription_ != 0) { Json::Value res, params; res["jsonrpc"] = "2.0"; res["method"] = "eth_subscription"; params["result"] = rpc::eth::toJson(payload); + params["result"]["transactions"] = rpc::eth::toJsonArray(trx_hashes); params["subscription"] = dev::toJS(new_heads_subscription_); res["params"] = params; auto response = util::to_string(res); @@ -318,10 +319,10 @@ void WsServer::newPbftBlockExecuted(PbftBlock const &pbft_blk, } } -void WsServer::newEthBlock(::taraxa::final_chain::BlockHeader const &payload) { +void WsServer::newEthBlock(const ::taraxa::final_chain::BlockHeader &payload, const TransactionHashes &trx_hashes) { boost::shared_lock lock(sessions_mtx_); for (auto const &session : sessions) { - if (!session->is_closed()) session->newEthBlock(payload); + if (!session->is_closed()) session->newEthBlock(payload, trx_hashes); } } diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index 09f8452c33..2e6002d89e 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -26,6 +26,7 @@ #include "network/rpc/jsonrpc_http_processor.hpp" #include "network/rpc/jsonrpc_ws_server.hpp" #include "pbft/pbft_manager.hpp" +#include "storage/migration/migration_manager.hpp" #include "transaction/gas_pricer.hpp" #include "transaction/transaction_manager.hpp" @@ -62,14 +63,12 @@ void FullNode::init() { conf_.db_config.db_max_open_files, conf_.db_config.db_max_snapshots, conf_.db_config.db_revert_to_period, node_addr, true); } - db_ = std::make_shared(conf_.db_path, conf_.db_config.db_snapshot_each_n_pbft_block, conf_.db_config.db_max_open_files, conf_.db_config.db_max_snapshots, - conf_.db_config.db_revert_to_period, node_addr, false, - conf_.db_config.rebuild_db_columns); + conf_.db_config.db_revert_to_period, node_addr, false); - if (db_->hasMinorVersionChanged()) { - LOG(log_si_) << "Minor DB version has changed. Rebuilding Db"; + if (db_->hasMajorVersionChanged()) { + LOG(log_si_) << "Major DB version has changed. Rebuilding Db"; conf_.db_config.rebuild_db = true; db_ = nullptr; old_db_ = std::make_shared(conf_.db_path, conf_.db_config.db_snapshot_each_n_pbft_block, @@ -78,7 +77,11 @@ void FullNode::init() { db_ = std::make_shared(conf_.db_path, conf_.db_config.db_snapshot_each_n_pbft_block, conf_.db_config.db_max_open_files, conf_.db_config.db_max_snapshots, conf_.db_config.db_revert_to_period, node_addr); + } else if (db_->hasMinorVersionChanged()) { + storage::migration::Manager(db_).applyAll(); } + db_->updateDbVersions(); + if (db_->getDagBlocksCount() == 0) { db_->setGenesisHash(conf_.genesis.genesisHash()); } @@ -234,7 +237,7 @@ void FullNode::start() { _eth_json_rpc->note_block_executed(*res->final_chain_blk, res->trxs, res->trx_receipts); } if (auto _ws = ws.lock()) { - _ws->newEthBlock(*res->final_chain_blk); + _ws->newEthBlock(*res->final_chain_blk, hashes_from_transactions(res->trxs)); if (auto _db = db.lock()) { auto pbft_blk = _db->getPbftBlock(res->hash); if (const auto &hash = pbft_blk->getPivotDagBlockHash(); hash != kNullBlockHash) { @@ -302,30 +305,6 @@ void FullNode::start() { }, subscription_pool_); - // Subscription to process hardforks - // final_chain_->block_applying_.subscribe([&](uint64_t block_num) { - // // TODO: should have only common hardfork code calling hardfork executor - // auto &state_conf = conf_.genesis.state; - // if (state_conf.hardforks.fix_genesis_fork_block == block_num) { - // for (auto &e : state_conf.dpos->genesis_state) { - // for (auto &b : e.second) { - // b.second *= kOneTara; - // } - // } - // for (auto &b : state_conf.initial_balances) { - // b.second *= kOneTara; - // } - // // we are multiplying it by TARA precision - // state_conf.dpos->eligibility_balance_threshold *= kOneTara; - // // amount of stake per vote should be 10 times smaller than eligibility threshold - // state_conf.dpos->vote_eligibility_balance_step.assign(state_conf.dpos->eligibility_balance_threshold); - // state_conf.dpos->eligibility_balance_threshold *= 10; - // // if this part of code will be needed we need to overwrite genesis json here - // // conf_.overwrite_chain_config_in_file(); - // final_chain_->update_state_config(state_conf); - // } - // }); - vote_mgr_->setNetwork(network_); pbft_mgr_->setNetwork(network_); dag_mgr_->setNetwork(network_); @@ -371,8 +350,8 @@ void FullNode::rebuildDb() { // Read pbft blocks one by one PbftPeriod period = 1; std::shared_ptr period_data, next_period_data; - std::vector> cert_votes; while (true) { + std::vector> cert_votes; if (next_period_data != nullptr) { period_data = next_period_data; } else { @@ -383,8 +362,11 @@ void FullNode::rebuildDb() { auto data = old_db_->getPeriodDataRaw(period + 1); if (data.size() == 0) { next_period_data = nullptr; - // Latest finalized block cert votes are saved in db as reward votes for new blocks - cert_votes = old_db_->getRewardVotes(); + // Latest finalized block cert votes are saved in db as 2t+1 cert votes + auto votes = old_db_->getAllTwoTPlusOneVotes(); + for (auto v : votes) { + if (v->getType() == PbftVoteTypes::cert_vote) cert_votes.push_back(v); + } } else { next_period_data = std::make_shared(std::move(data)); cert_votes = next_period_data->previous_block_cert_votes; diff --git a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp new file mode 100644 index 0000000000..6ea3f15dd7 --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp @@ -0,0 +1,36 @@ +#pragma once +#include "logger/logger.hpp" +#include "storage/storage.hpp" + +namespace taraxa::storage::migration { +class Base { + public: + Base(std::shared_ptr db) : db_(std::move(db)), batch_(db_->createWriteBatch()) {} + virtual ~Base() = default; + virtual std::string id() = 0; + // We need to specify version here, so in case of major version change(db reindex) we won't apply unneeded migrations + virtual uint32_t dbVersion() = 0; + + bool isApplied() { return db_->lookup_int(id(), DB::Columns::migrations).has_value(); } + void apply(logger::Logger& log) { + if (db_->getMajorVersion() != dbVersion()) { + LOG(log) << id() + << ": skip migration as it was made for different major db version. Could be removed from the code" + << std::endl; + return; + } + migrate(); + setApplied(); + db_->commitWriteBatch(batch_); + } + + protected: + // Method with custom logic. All db changes should be made using `batch_` + virtual void migrate() = 0; + + void setApplied() { db_->insert(batch_, DB::Columns::migrations, id(), true); } + + std::shared_ptr db_; + DB::Batch batch_; +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/migration/migration_manager.hpp b/libraries/core_libs/storage/include/storage/migration/migration_manager.hpp new file mode 100644 index 0000000000..1a4500e4da --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/migration_manager.hpp @@ -0,0 +1,19 @@ +#pragma once +#include "storage/migration/migration_base.hpp" + +namespace taraxa::storage::migration { +class Manager { + public: + explicit Manager(std::shared_ptr db, const addr_t& node_addr = {}); + template + void registerMigration() { + migrations_.push_back(std::make_shared(db_)); + } + void applyAll(); + + private: + std::shared_ptr db_; + std::vector> migrations_; + LOG_OBJECTS_DEFINE +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp b/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp new file mode 100644 index 0000000000..ab0876a987 --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp @@ -0,0 +1,16 @@ +#pragma once +#include + +#include "final_chain/final_chain.hpp" +#include "storage/migration/migration_base.hpp" +#include "transaction/transaction.hpp" + +namespace taraxa::storage::migration { +class TransactionHashes : public migration::Base { + public: + TransactionHashes(std::shared_ptr db); + std::string id() override; + uint32_t dbVersion() override; + void migrate() override; +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 5f3ae3fbd1..f8e8bdd47a 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -90,6 +90,8 @@ class DbStorage : public std::enable_shared_from_this { // do not change/move COLUMN(default_column); + // migrations + COLUMN(migrations); // Contains full data for an executed PBFT block including PBFT block, cert votes, dag blocks and transactions COLUMN_W_COMP(period_data, getIntComparator()); COLUMN(genesis); @@ -105,16 +107,13 @@ class DbStorage : public std::enable_shared_from_this { COLUMN(pbft_head); COLUMN(latest_round_own_votes); // own votes of any type for the latest round COLUMN(latest_round_two_t_plus_one_votes); // 2t+1 votes bundles of any type for the latest round - COLUMN(latest_reward_votes); // extra reward votes on top of 2t+1 cert votes bundle from + COLUMN(extra_reward_votes); // extra reward votes on top of 2t+1 cert votes bundle from // latest_round_two_t_plus_one_votes COLUMN(pbft_block_period); COLUMN(dag_block_period); COLUMN_W_COMP(proposal_period_levels_map, getIntComparator()); COLUMN(final_chain_meta); - COLUMN(final_chain_transaction_location_by_hash); - COLUMN(final_chain_replay_protection); COLUMN(final_chain_transaction_hashes_by_blk_number); - COLUMN(final_chain_transaction_count_by_blk_number); COLUMN(final_chain_blk_by_number); COLUMN(final_chain_blk_hash_by_number); COLUMN(final_chain_blk_number_by_hash); @@ -122,6 +121,8 @@ class DbStorage : public std::enable_shared_from_this { COLUMN(final_chain_log_blooms_index); COLUMN_W_COMP(sortition_params_change, getIntComparator()); + COLUMN_W_COMP(block_rewards_stats, getIntComparator()); + #undef COLUMN #undef COLUMN_W_COMP }; @@ -144,6 +145,8 @@ class DbStorage : public std::enable_shared_from_this { const uint32_t kDbSnapshotsMaxCount = 0; std::set snapshots_; + uint32_t kMajorVersion_; + bool major_version_changed_ = false; bool minor_version_changed_ = false; auto handle(Column const& col) const { return handles_[col.ordinal_]; } @@ -153,7 +156,7 @@ class DbStorage : public std::enable_shared_from_this { public: explicit DbStorage(fs::path const& base_path, uint32_t db_snapshot_each_n_pbft_block = 0, uint32_t max_open_files = 0, uint32_t db_max_snapshots = 0, PbftPeriod db_revert_to_period = 0, addr_t node_addr = addr_t(), - bool rebuild = false, bool rebuild_columns = false); + bool rebuild = false); ~DbStorage(); DbStorage(const DbStorage&) = delete; @@ -175,6 +178,15 @@ class DbStorage : public std::enable_shared_from_this { void loadSnapshots(); void disableSnapshots(); void enableSnapshots(); + void updateDbVersions(); + void deleteColumnData(const Column& c); + + // For removal of LOG.old.* files in the database + void removeOldLogFiles() const; + void removeFilesWithPattern(const std::string& directory, const std::regex& pattern) const; + + uint32_t getMajorVersion() const; + std::unique_ptr getColumnIterator(const Column& c); // Genesis void setGenesisHash(const h256& genesis_hash); @@ -231,6 +243,7 @@ class DbStorage : public std::enable_shared_from_this { void addTransactionPeriodToBatch(Batch& write_batch, trx_hash_t const& trx, PbftPeriod period, uint32_t position); std::optional> getTransactionPeriod(trx_hash_t const& hash) const; std::unordered_map getAllTransactionPeriod(); + uint64_t getTransactionCount(PbftPeriod period) const; // PBFT manager uint32_t getPbftMgrField(PbftMgrField field); @@ -267,15 +280,17 @@ class DbStorage : public std::enable_shared_from_this { // Own votes for the latest round void saveOwnVerifiedVote(const std::shared_ptr& vote); std::vector> getOwnVerifiedVotes(); - void clearOwnVerifiedVotes(Batch& write_batch); + void clearOwnVerifiedVotes(Batch& write_batch, const std::vector>& own_verified_votes); // 2t+1 votes bundles for the latest round void replaceTwoTPlusOneVotes(TwoTPlusOneVotedBlockType type, const std::vector>& votes); + void replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType type, const std::vector>& votes, + Batch& write_batch); std::vector> getAllTwoTPlusOneVotes(); // Reward votes - cert votes for the latest finalized block - void replaceRewardVotes(const std::vector>& votes, Batch& write_batch); - void saveRewardVote(const std::shared_ptr& vote); + void removeExtraRewardVotes(const std::vector& votes, Batch& write_batch); + void saveExtraRewardVote(const std::shared_ptr& vote); std::vector> getRewardVotes(); // period_pbft_block @@ -301,6 +316,7 @@ class DbStorage : public std::enable_shared_from_this { void addProposalPeriodDagLevelsMapToBatch(uint64_t level, PbftPeriod period, Batch& write_batch); bool hasMinorVersionChanged() { return minor_version_changed_; } + bool hasMajorVersionChanged() { return major_version_changed_; } void compactColumn(Column const& column) { db_->CompactRange({}, handle(column), nullptr, nullptr); } diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp new file mode 100644 index 0000000000..7f5dabd999 --- /dev/null +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -0,0 +1,20 @@ +#include "storage/migration/migration_manager.hpp" + +#include "storage/migration/transaction_hashes.hpp" + +namespace taraxa::storage::migration { +Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { + LOG_OBJECTS_CREATE("MIGRATIONS"); + registerMigration(); +} + +void Manager::applyAll() { + for (const auto& m : migrations_) { + if (!m->isApplied()) { + LOG(log_si_) << "Applying migration " << m->id(); + m->apply(log_si_); + LOG(log_si_) << "Migration applied " << m->id(); + } + } +} +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/migration/transaction_hashes.cpp b/libraries/core_libs/storage/src/migration/transaction_hashes.cpp new file mode 100644 index 0000000000..63f5ab0b0e --- /dev/null +++ b/libraries/core_libs/storage/src/migration/transaction_hashes.cpp @@ -0,0 +1,37 @@ +#include "storage/migration/transaction_hashes.hpp" + +namespace taraxa::storage::migration { +struct OldTransactionsHashes { + std::string serialized_; + size_t count_; + + explicit OldTransactionsHashes(std::string serialized) + : serialized_(std::move(serialized)), count_(serialized_.size() / dev::h256::size) {} + dev::h256 get(size_t i) const { + return dev::h256(reinterpret_cast(serialized_.data() + i * dev::h256::size), + dev::h256::ConstructFromPointer); + } + size_t count() const { return count_; } +}; + +TransactionHashes::TransactionHashes(std::shared_ptr db) : migration::Base(db) {} + +std::string TransactionHashes::id() { return "TransactionHashes"; } + +uint32_t TransactionHashes::dbVersion() { return 1; } + +void TransactionHashes::migrate() { + auto it = db_->getColumnIterator(DB::Columns::final_chain_transaction_hashes_by_blk_number); + + // Get and save data in new format for all blocks + for (it->SeekToFirst(); it->Valid(); it->Next()) { + ::taraxa::TransactionHashes new_data; + auto old_data = std::make_unique(it->value().ToString()); + new_data.reserve(old_data->count()); + for (size_t i = 0; i < new_data.capacity(); ++i) { + new_data.emplace_back(old_data->get(i)); + } + db_->insert(batch_, DB::Columns::final_chain_transaction_hashes_by_blk_number, it->key(), dev::rlp(new_data)); + } +} +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index fd4d10ee94..6f3beff3bd 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -2,7 +2,9 @@ #include #include +#include #include +#include #include "config/version.hpp" #include "dag/sortition_params_manager.hpp" @@ -19,8 +21,7 @@ static constexpr uint16_t DAG_BLOCKS_POS_IN_PERIOD_DATA = 2; static constexpr uint16_t TRANSACTIONS_POS_IN_PERIOD_DATA = 3; DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_block, uint32_t max_open_files, - uint32_t db_max_snapshots, PbftPeriod db_revert_to_period, addr_t node_addr, bool rebuild, - bool rebuild_columns) + uint32_t db_max_snapshots, PbftPeriod db_revert_to_period, addr_t node_addr, bool rebuild) : path_(path), handles_(Columns::all.size()), kDbSnapshotsEachNblock(db_snapshot_each_n_pbft_block), @@ -42,6 +43,8 @@ DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_bloc } fs::create_directories(db_path_); + removeOldLogFiles(); + rocksdb::Options options; options.create_missing_column_families = true; options.create_if_missing = true; @@ -60,9 +63,7 @@ DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_bloc }); LOG_OBJECTS_CREATE("DBS"); - if (rebuild_columns) { - rebuildColumns(options); - } + rebuildColumns(options); // Iterate over the db folders and populate snapshot set loadSnapshots(); @@ -79,33 +80,69 @@ DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_bloc dag_blocks_count_.store(getStatusField(StatusDbField::DagBlkCount)); dag_edge_count_.store(getStatusField(StatusDbField::DagEdgeCount)); - uint32_t major_version = getStatusField(StatusDbField::DbMajorVersion); + kMajorVersion_ = getStatusField(StatusDbField::DbMajorVersion); uint32_t minor_version = getStatusField(StatusDbField::DbMinorVersion); - if (major_version == 0 && minor_version == 0) { - saveStatusField(StatusDbField::DbMajorVersion, TARAXA_DB_MAJOR_VERSION); - saveStatusField(StatusDbField::DbMinorVersion, TARAXA_DB_MINOR_VERSION); - } else { - if (major_version != TARAXA_DB_MAJOR_VERSION) { - throw DbException(string("Database version mismatch. Version on disk ") + - getFormattedVersion({major_version, minor_version}) + - " Node version:" + getFormattedVersion({TARAXA_DB_MAJOR_VERSION, TARAXA_DB_MINOR_VERSION})); - } else if (minor_version != TARAXA_DB_MINOR_VERSION) { - minor_version_changed_ = true; + if (kMajorVersion_ != 0 && kMajorVersion_ != TARAXA_DB_MAJOR_VERSION) { + major_version_changed_ = true; + } else if (minor_version != TARAXA_DB_MINOR_VERSION) { + minor_version_changed_ = true; + } +} + +void DbStorage::removeOldLogFiles() const { + const std::regex filePattern("LOG\\.old\\.\\d+"); + removeFilesWithPattern(db_path_, filePattern); + removeFilesWithPattern(state_db_path_, filePattern); +} + +void DbStorage::removeFilesWithPattern(const std::string& directory, const std::regex& pattern) const { + try { + for (const auto& entry : std::filesystem::directory_iterator(directory)) { + const std::string& filename = entry.path().filename().string(); + if (std::regex_match(filename, pattern)) { + std::filesystem::remove(entry.path()); + LOG(log_dg_) << "Removed file: " << filename << std::endl; + } } + } catch (const std::filesystem::filesystem_error& e) { + LOG(log_dg_) << "Error accessing directory: " << e.what() << std::endl; } } +void DbStorage::updateDbVersions() { + saveStatusField(StatusDbField::DbMajorVersion, TARAXA_DB_MAJOR_VERSION); + saveStatusField(StatusDbField::DbMinorVersion, TARAXA_DB_MINOR_VERSION); +} + +void DbStorage::deleteColumnData(const Column& c) { + checkStatus(db_->DropColumnFamily(handle(c))); + + auto options = rocksdb::ColumnFamilyOptions(); + if (c.comparator_) { + options.comparator = c.comparator_; + } + checkStatus(db_->CreateColumnFamily(options, c.name(), &handles_[c.ordinal_])); +} + void DbStorage::rebuildColumns(const rocksdb::Options& options) { std::unique_ptr db; std::vector column_families; rocksdb::DB::ListColumnFamilies(options, db_path_.string(), &column_families); + if (column_families.empty()) { + LOG(log_wr_) << "DB isn't initialized in rebuildColumns. Skip it"; + return; + } std::vector descriptors; descriptors.reserve(column_families.size()); std::vector handles; handles.reserve(column_families.size()); std::transform(column_families.begin(), column_families.end(), std::back_inserter(descriptors), [](const auto& name) { - return rocksdb::ColumnFamilyDescriptor(name, rocksdb::ColumnFamilyOptions()); + const auto it = std::find_if(Columns::all.begin(), Columns::all.end(), + [&name](const Column& col) { return col.name() == name; }); + auto options = rocksdb::ColumnFamilyOptions(); + if (it != Columns::all.end() && it->comparator_) options.comparator = it->comparator_; + return rocksdb::ColumnFamilyDescriptor(name, options); }); rocksdb::DB* db_ptr = nullptr; checkStatus(rocksdb::DB::Open(options, db_path_.string(), descriptors, &handles, &db_ptr)); @@ -257,6 +294,12 @@ DbStorage::~DbStorage() { checkStatus(db_->Close()); } +uint32_t DbStorage::getMajorVersion() const { return kMajorVersion_; } + +std::unique_ptr DbStorage::getColumnIterator(const Column& c) { + return std::unique_ptr(db_->NewIterator(read_options_, handle(c))); +} + void DbStorage::checkStatus(rocksdb::Status const& status) { if (status.ok()) return; throw DbException(string("Db error. Status code: ") + std::to_string(status.code()) + @@ -444,14 +487,14 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period) { auto start_slice = toSlice(start_period); auto end_slice = toSlice(end_period); for (auto period = start_period; period < end_period; period++) { - // Find transactions included in the old blocks and delete data related to these transactions to free disk space + // Find transactions included in the old blocks and delete data related to these transactions to free disk + // space auto trx_hashes_raw = lookup(period, DB::Columns::final_chain_transaction_hashes_by_blk_number); auto hashes_count = trx_hashes_raw.size() / trx_hash_t::size; for (uint32_t i = 0; i < hashes_count; i++) { - auto hash = - trx_hash_t((uint8_t*)(trx_hashes_raw.data() + i * trx_hash_t::size), trx_hash_t::ConstructFromPointer); + auto hash = trx_hash_t(reinterpret_cast(trx_hashes_raw.data() + i * trx_hash_t::size), + trx_hash_t::ConstructFromPointer); remove(write_batch, Columns::final_chain_receipt_by_trx_hash, hash); - remove(write_batch, Columns::final_chain_transaction_location_by_hash, hash); } remove(write_batch, Columns::final_chain_transaction_hashes_by_blk_number, EthBlockNumber(period)); if ((period - start_period + 1) % max_batch_delete == 0) { @@ -462,11 +505,10 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period) { commitWriteBatch(write_batch); db_->DeleteRange(write_options_, handle(Columns::period_data), start_slice, end_slice); - // Deletion alone does not guarantee that the disk space is freed, these CompactRange methods actually compact the - // data in the database and free disk space + // Deletion alone does not guarantee that the disk space is freed, these CompactRange methods actually compact + // the data in the database and free disk space db_->CompactRange({}, handle(Columns::period_data), &start_slice, &end_slice); db_->CompactRange({}, handle(Columns::final_chain_receipt_by_trx_hash), nullptr, nullptr); - db_->CompactRange({}, handle(Columns::final_chain_transaction_location_by_hash), nullptr, nullptr); db_->CompactRange({}, handle(Columns::final_chain_transaction_hashes_by_blk_number), nullptr, nullptr); } } @@ -608,6 +650,15 @@ std::shared_ptr DbStorage::getTransaction(trx_hash_t const& hash) { return nullptr; } +uint64_t DbStorage::getTransactionCount(PbftPeriod period) const { + auto period_data = getPeriodDataRaw(period); + if (period_data.size()) { + auto period_data_rlp = dev::RLP(period_data); + return period_data_rlp[TRANSACTIONS_POS_IN_PERIOD_DATA].itemCount(); + } + return 0; +} + std::pair, trx_hash_t> DbStorage::getFinalizedTransactions( std::vector const& trx_hashes) const { // Map of period to position of transactions within a period @@ -817,13 +868,10 @@ std::vector> DbStorage::getOwnVerifiedVotes() { return votes; } -void DbStorage::clearOwnVerifiedVotes(Batch& write_batch) { - // TODO: deletion could be optimized if we save votes in memory - auto it = - std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::latest_round_own_votes))); - for (it->SeekToFirst(); it->Valid(); it->Next()) { - const auto vote = std::make_shared(asBytes(it->value().ToString())); - remove(write_batch, Columns::latest_round_own_votes, vote->getHash().asBytes()); +void DbStorage::clearOwnVerifiedVotes(Batch& write_batch, + const std::vector>& own_verified_votes) { + for (const auto& own_vote : own_verified_votes) { + remove(write_batch, Columns::latest_round_own_votes, own_vote->getHash().asBytes()); } } @@ -838,6 +886,17 @@ void DbStorage::replaceTwoTPlusOneVotes(TwoTPlusOneVotedBlockType type, insert(Columns::latest_round_two_t_plus_one_votes, static_cast(type), s.out()); } +void DbStorage::replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType type, + const std::vector>& votes, Batch& write_batch) { + remove(write_batch, Columns::latest_round_two_t_plus_one_votes, static_cast(type)); + + dev::RLPStream s(votes.size()); + for (const auto& vote : votes) { + s.appendRaw(vote->rlp(true, true)); + } + insert(write_batch, Columns::latest_round_two_t_plus_one_votes, static_cast(type), s.out()); +} + std::vector> DbStorage::getAllTwoTPlusOneVotes() { std::vector> votes; auto load_db_votes = [this, &votes](TwoTPlusOneVotedBlockType type) { @@ -858,29 +917,20 @@ std::vector> DbStorage::getAllTwoTPlusOneVotes() { return votes; } -void DbStorage::replaceRewardVotes(const std::vector>& votes, Batch& write_batch) { - // TODO: deletion could be optimized if we save votes in memory - // Remove existing reward votes - auto it = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::latest_reward_votes))); - for (it->SeekToFirst(); it->Valid(); it->Next()) { - const auto vote = std::make_shared(asBytes(it->value().ToString())); - remove(write_batch, Columns::latest_reward_votes, vote->getHash().asBytes()); - } - - // Add new reward votes - for (const auto& vote : votes) { - insert(write_batch, Columns::latest_reward_votes, vote->getHash().asBytes(), vote->rlp(true, true)); +void DbStorage::removeExtraRewardVotes(const std::vector& votes, Batch& write_batch) { + for (const auto& v : votes) { + remove(write_batch, Columns::extra_reward_votes, v.asBytes()); } } -void DbStorage::saveRewardVote(const std::shared_ptr& vote) { - insert(Columns::latest_reward_votes, vote->getHash().asBytes(), vote->rlp(true, true)); +void DbStorage::saveExtraRewardVote(const std::shared_ptr& vote) { + insert(Columns::extra_reward_votes, vote->getHash().asBytes(), vote->rlp(true, true)); } std::vector> DbStorage::getRewardVotes() { std::vector> votes; - auto it = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::latest_reward_votes))); + auto it = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::extra_reward_votes))); for (it->SeekToFirst(); it->Valid(); it->Next()) { votes.emplace_back(std::make_shared(asBytes(it->value().ToString()))); } diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index ccfcd6b8a3..9aa3a35cbf 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -7,7 +7,6 @@ #include "common/types.hpp" #include "dag/dag_block.hpp" -#include "transaction/transaction.hpp" #include "vote/vote.hpp" namespace taraxa { @@ -17,7 +16,7 @@ namespace taraxa { */ /** - * @brief The PbftBlockk class is a PBFT block class that includes PBFT block hash, previous PBFT block hash, DAG anchor + * @brief The PbftBlock class is a PBFT block class that includes PBFT block hash, previous PBFT block hash, DAG anchor * hash, DAG blocks ordering hash, period number, timestamp, proposer address, and proposer signature. */ class PbftBlock { @@ -36,8 +35,8 @@ class PbftBlock { PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, const blk_hash_t& order_hash, const blk_hash_t& prev_state_root, PbftPeriod period, const addr_t& beneficiary, const secret_t& sk, std::vector&& reward_votes); - explicit PbftBlock(dev::RLP const& rlp); - explicit PbftBlock(bytes const& RLP); + explicit PbftBlock(const dev::RLP& rlp); + explicit PbftBlock(const bytes& RLP); /** * @brief Secure Hash Algorithm 3 @@ -78,33 +77,33 @@ class PbftBlock { * @param dag_blks DAG blocks hashes * @return PBFT block with DAG blocks in JSON */ - static Json::Value toJson(PbftBlock const& b, std::vector const& dag_blks); + static Json::Value toJson(const PbftBlock& b, const std::vector& dag_blks); /** * @brief Get PBFT block hash * @return PBFT block hash */ - auto const& getBlockHash() const { return block_hash_; } + const auto& getBlockHash() const { return block_hash_; } /** * @brief Get previous PBFT block hash * @return previous PBFT block hash */ - auto const& getPrevBlockHash() const { return prev_block_hash_; } + const auto& getPrevBlockHash() const { return prev_block_hash_; } /** * @brief Get DAG anchor hash for the finalized PBFT block * @return DAG anchor hash */ - auto const& getPivotDagBlockHash() const { return dag_block_hash_as_pivot_; } + const auto& getPivotDagBlockHash() const { return dag_block_hash_as_pivot_; } /** * @brief Get DAG blocks ordering hash * @return DAG blocks ordering hash */ - auto const& getOrderHash() const { return order_hash_; } + const auto& getOrderHash() const { return order_hash_; } - auto const& getPrevStateRoot() const { return prev_state_root_hash_; } + const auto& getPrevStateRoot() const { return prev_state_root_hash_; } /** * @brief Get period number @@ -122,7 +121,8 @@ class PbftBlock { * @brief Get PBFT block proposer address * @return PBFT block proposer address */ - auto const& getBeneficiary() const { return beneficiary_; } + const auto& getBeneficiary() const { return beneficiary_; } + const auto& getRewardVotes() const { return reward_votes_; } private: @@ -130,8 +130,14 @@ class PbftBlock { * @brief Set PBFT block hash and block proposer address */ void calculateHash_(); + + /** + * @brief Check if all rewards votes are unique + * + */ + void checkUniqueRewardVotes(); }; -std::ostream& operator<<(std::ostream& strm, PbftBlock const& pbft_blk); +std::ostream& operator<<(std::ostream& strm, const PbftBlock& pbft_blk); /** @}*/ diff --git a/libraries/types/pbft_block/src/pbft_block.cpp b/libraries/types/pbft_block/src/pbft_block.cpp index 94668acbee..317ef637c5 100644 --- a/libraries/types/pbft_block/src/pbft_block.cpp +++ b/libraries/types/pbft_block/src/pbft_block.cpp @@ -7,12 +7,14 @@ #include "common/jsoncpp.hpp" namespace taraxa { + PbftBlock::PbftBlock(bytes const& b) : PbftBlock(dev::RLP(b)) {} PbftBlock::PbftBlock(dev::RLP const& rlp) { util::rlp_tuple(util::RLPDecoderRef(rlp, true), prev_block_hash_, dag_block_hash_as_pivot_, order_hash_, prev_state_root_hash_, period_, timestamp_, reward_votes_, signature_); calculateHash_(); + checkUniqueRewardVotes(); } PbftBlock::PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, @@ -28,6 +30,7 @@ PbftBlock::PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_ timestamp_ = dev::utcTime(); signature_ = dev::sign(sk, sha3(false)); calculateHash_(); + checkUniqueRewardVotes(); } Json::Value PbftBlock::toJson(PbftBlock const& b, std::vector const& dag_blks) { @@ -53,6 +56,17 @@ void PbftBlock::calculateHash_() { beneficiary_ = dev::right160(dev::sha3(dev::bytesConstRef(p.data(), sizeof(p)))); } +void PbftBlock::checkUniqueRewardVotes() { + std::unordered_set set; + set.reserve(reward_votes_.size()); + for (const auto& hash : reward_votes_) { + if (!set.insert(hash).second) { + throw std::runtime_error( + fmt("Invalid PBFT Block %s proposed by %s has duplicated vote %s", block_hash_, beneficiary_, hash)); + } + } +} + blk_hash_t PbftBlock::sha3(bool include_sig) const { return dev::sha3(rlp(include_sig)); } std::string PbftBlock::getJsonStr() const { return getJson().toStyledString(); } diff --git a/libraries/types/transaction/include/transaction/transaction.hpp b/libraries/types/transaction/include/transaction/transaction.hpp index 04e6f6c014..e05cfa3f08 100644 --- a/libraries/types/transaction/include/transaction/transaction.hpp +++ b/libraries/types/transaction/include/transaction/transaction.hpp @@ -10,8 +10,16 @@ namespace taraxa { struct Transaction { - struct InvalidSignature : std::runtime_error { - explicit InvalidSignature(std::string const &msg) : runtime_error("invalid signature:\n" + msg) {} + struct InvalidTransaction : std::runtime_error { + explicit InvalidTransaction(const std::string &msg) : runtime_error("invalid transaction - " + msg) {} + }; + + struct InvalidSignature : InvalidTransaction { + explicit InvalidSignature(const std::string &msg) : InvalidTransaction("signature:\n" + msg) {} + }; + + struct InvalidFormat : InvalidTransaction { + explicit InvalidFormat(const std::string &msg) : InvalidTransaction("rlp format:\n" + msg) {} }; private: @@ -70,7 +78,10 @@ struct Transaction { }; using SharedTransaction = std::shared_ptr; -using Transactions = ::std::vector; -using SharedTransactions = ::std::vector; +using Transactions = std::vector; +using SharedTransactions = std::vector; +using TransactionHashes = std::vector; + +TransactionHashes hashes_from_transactions(const SharedTransactions &transactions); } // namespace taraxa diff --git a/libraries/types/transaction/src/transaction.cpp b/libraries/types/transaction/src/transaction.cpp index b6f7bc797b..79f25ba161 100644 --- a/libraries/types/transaction/src/transaction.cpp +++ b/libraries/types/transaction/src/transaction.cpp @@ -3,6 +3,7 @@ #include +#include #include #include @@ -14,11 +15,19 @@ using namespace dev; uint64_t toChainID(u256 const &val) { if (val == 0 || std::numeric_limits::max() < val) { - BOOST_THROW_EXCEPTION(Transaction::InvalidSignature("eip-155 chain id must be in the open interval: (0, 2^64)")); + BOOST_THROW_EXCEPTION(Transaction::InvalidTransaction("eip-155 chain id must be in the open interval: (0, 2^64)")); } return static_cast(val); } +TransactionHashes hashes_from_transactions(const SharedTransactions &transactions) { + TransactionHashes trx_hashes; + trx_hashes.reserve(transactions.size()); + std::transform(transactions.cbegin(), transactions.cend(), std::back_inserter(trx_hashes), + [](const auto &trx) { return trx->getHash(); }); + return trx_hashes; +} + Transaction::Transaction(const trx_nonce_t &nonce, const val_t &value, const val_t &gas_price, gas_t gas, bytes data, const secret_t &sk, const optional &receiver, uint64_t chain_id) : nonce_(nonce), @@ -71,7 +80,7 @@ void Transaction::fromRLP(const dev::RLP &_rlp, bool verify_strict, const h256 & if (36 < v) { chain_id_ = toChainID((v - 35) / 2); } else if (v != 27 && v != 28) { - BOOST_THROW_EXCEPTION(InvalidSignature( + BOOST_THROW_EXCEPTION(InvalidFormat( "only values 27 and 28 are allowed for non-replay protected transactions for the 'v' signature field")); } vrs_.v = chain_id_ ? byte{v - (u256{chain_id_} * 2 + 35)} : byte{v - 27}; diff --git a/submodules/CMakeLists.txt b/submodules/CMakeLists.txt index a538dde2e6..3c5becf960 100644 --- a/submodules/CMakeLists.txt +++ b/submodules/CMakeLists.txt @@ -27,8 +27,6 @@ add_library( ) target_include_directories(ethash PUBLIC ${include_dir}) -include(ExternalProject) - # prefix of build dir set(BUILD_DIR_PREFIX "${CMAKE_BINARY_DIR}/deps") ## add not cmake target diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 18af5586e6..ffd207a4cb 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 18af5586e6852c419ab05d2a4107ff98968f1bdf +Subproject commit ffd207a4cb80919c1cdbf26f919b3a158ce497f3 diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 93c965100c..f9613497b3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -37,13 +37,6 @@ add_executable(full_node_test full_node_test.cpp) target_link_libraries(full_node_test test_util) add_test(full_node_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/full_node_test) -# add_executable(hardfork_test hardfork_test.cpp) -# target_link_libraries(hardfork_test -# core_libs -# CONAN_PKG::gtest -# ) -# add_test(hardfork_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/hardfork_test) - add_executable(network_test network_test.cpp) target_link_libraries(network_test test_util) add_test(network_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/network_test) @@ -76,6 +69,9 @@ add_executable(vote_test vote_test.cpp) target_link_libraries(vote_test test_util) add_test(vote_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/vote_test) +add_executable(rewards_stats_test rewards_stats_test.cpp) +target_link_libraries(rewards_stats_test test_util) +add_test(rewards_stats_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/rewards_stats_test) add_executable(tarcap_threadpool_test tarcap_threadpool_test.cpp) target_link_libraries(tarcap_threadpool_test test_util) diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 9b78ed321b..781223a14b 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -7,6 +7,8 @@ #include "common/vrf_wrapper.hpp" #include "config/config.hpp" #include "final_chain/trie_common.hpp" +#include "libdevcore/CommonJS.h" +#include "network/rpc/eth/Eth.h" #include "test_util/gtest.hpp" #include "test_util/samples.hpp" #include "test_util/test_util.hpp" @@ -42,20 +44,20 @@ struct FinalChainTest : WithDataDir { } auto advance(const SharedTransactions& trxs, advance_check_opts opts = {}) { - SUT = nullptr; - SUT = NewFinalChain(db, cfg); std::vector trx_hashes; - int pos = 0; + ++expected_blk_num; for (const auto& trx : trxs) { - db->saveTransactionPeriod(trx->getHash(), 1, pos++); trx_hashes.emplace_back(trx->getHash()); } - DagBlock dag_blk({}, {}, {}, trx_hashes, {}, {}, secret_t::random()); + + auto proposer_keys = dev::KeyPair::create(); + DagBlock dag_blk({}, {}, {}, trx_hashes, {}, {}, proposer_keys.secret()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = - std::make_shared(kNullBlockHash, kNullBlockHash, kNullBlockHash, kNullBlockHash, 1, addr_t::random(), - dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); + std::make_shared(kNullBlockHash, kNullBlockHash, kNullBlockHash, kNullBlockHash, expected_blk_num, + addr_t::random(), proposer_keys.secret(), std::move(reward_votes_hashes)); + std::vector> votes; PeriodData period_data(pbft_block, votes); period_data.dag_blocks.push_back(dag_blk); @@ -63,11 +65,9 @@ struct FinalChainTest : WithDataDir { auto batch = db->createWriteBatch(); db->savePeriodData(period_data, batch); - db->commitWriteBatch(batch); auto result = SUT->finalize(std::move(period_data), {dag_blk.getHash()}).get(); - ++expected_blk_num; const auto& blk_h = *result->final_chain_blk; EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header(blk_h.number))); EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header())); @@ -181,12 +181,6 @@ TEST_F(FinalChainTest, initial_balances) { init(); } -// TEST_F(FinalChainTest, update_state_config) { -// init(); -// cfg.genesis.state.hardforks.fix_genesis_fork_block = 2222222; -// SUT->update_state_config(cfg.genesis.state); -// } - TEST_F(FinalChainTest, contract) { auto sender_keys = dev::KeyPair::create(); const auto& addr = sender_keys.address(); @@ -455,6 +449,9 @@ TEST_F(FinalChainTest, failed_transaction_fee) { auto trx2_1 = std::make_shared(2, 101, 1, gas, dev::bytes(), sk, receiver); advance({trx1}); + auto blk = SUT->block_header(expected_blk_num); + auto proposer_balance = SUT->getBalance(blk->author); + EXPECT_EQ(proposer_balance.first, 21000); advance({trx2}); advance({trx3}); @@ -481,6 +478,154 @@ TEST_F(FinalChainTest, failed_transaction_fee) { } } +TEST_F(FinalChainTest, revert_reason) { + // contract TestRevert { + // function test(bool arg) public pure { + // require(arg, "arg required"); + // } + // } + const auto test_contract_code = + "608060405234801561001057600080fd5b506101ac806100206000396000f3fe608060405234801561001057600080fd5b50600436106100" + "2b5760003560e01c806336091dff14610030575b600080fd5b61004a600480360381019061004591906100cc565b61004c565b005b806100" + "8c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161008390610156565b60405180" + "910390fd5b50565b600080fd5b60008115159050919050565b6100a981610094565b81146100b457600080fd5b50565b6000813590506100" + "c6816100a0565b92915050565b6000602082840312156100e2576100e161008f565b5b60006100f0848285016100b7565b91505092915050" + "565b600082825260208201905092915050565b7f617267207265717569726564000000000000000000000000000000000000000060008201" + "5250565b6000610140600c836100f9565b915061014b8261010a565b602082019050919050565b6000602082019050818103600083015261" + "016f81610133565b905091905056fea2646970667358221220846c5a92aab30dade0d92661a25b1fd6ba9a914fd114f2f264c2003b5abdda" + "db64736f6c63430008120033"; + auto sender_keys = dev::KeyPair::create(); + const auto& from = sender_keys.address(); + const auto& sk = sender_keys.secret(); + cfg.genesis.state.initial_balances = {}; + cfg.genesis.state.initial_balances[from] = u256("10000000000000000000000"); + init(); + + net::rpc::eth::EthParams eth_rpc_params; + eth_rpc_params.chain_id = cfg.genesis.chain_id; + eth_rpc_params.gas_limit = cfg.genesis.dag.gas_limit; + eth_rpc_params.final_chain = SUT; + auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); + + auto nonce = 0; + auto trx1 = std::make_shared(nonce++, 0, 0, TEST_TX_GAS_LIMIT, dev::fromHex(test_contract_code), sk); + auto result = advance({trx1}); + auto test_contract_addr = result->trx_receipts[0].new_contract_address; + EXPECT_EQ(test_contract_addr, dev::right160(dev::sha3(dev::rlpList(from, 0)))); + auto call_data = "0x36091dff0000000000000000000000000000000000000000000000000000000000000000"; + { + Json::Value est(Json::objectValue); + est["to"] = dev::toHex(*test_contract_addr); + est["from"] = dev::toHex(from); + est["data"] = call_data; + EXPECT_THROW_WITH(dev::jsToInt(eth_json_rpc->eth_estimateGas(est)), std::exception, + "evm: execution reverted: arg required"); + EXPECT_THROW_WITH(eth_json_rpc->eth_call(est, "latest"), std::exception, "evm: execution reverted: arg required"); + + auto gas = 100000; + auto trx = std::make_shared(2, 0, 1, gas, dev::fromHex(call_data), sk, test_contract_addr); + auto result = advance({trx}, {0, 0, 1}); + auto receipt = result->trx_receipts.front(); + ASSERT_EQ(receipt.status_code, 0); // failed + ASSERT_GT(gas, receipt.gas_used); // we aren't spending all gas in such cases + } +} + +TEST_F(FinalChainTest, incorrect_estimation_regress) { + // contract Receiver { + // uint256 public receivedETH; + // receive() external payable { + // receivedETH += msg.value; + // } + // } + const auto receiver_contract_code = + "608060405234801561001057600080fd5b5061012d806100206000396000f3fe608060405260043610601f5760003560e01c8063820bec9d" + "14603f57603a565b36603a57346000808282546032919060a4565b925050819055005b600080fd5b348015604a57600080fd5b5060516065" + "565b604051605c919060de565b60405180910390f35b60005481565b6000819050919050565b7f4e487b7100000000000000000000000000" + "000000000000000000000000000000600052601160045260246000fd5b600060ad82606b565b915060b683606b565b925082820190508082" + "111560cb5760ca6075565b5b92915050565b60d881606b565b82525050565b600060208201905060f1600083018460d1565b9291505056fe" + "a264697066735822122099ea1faf8b41cec96834060f2daaea3ae5c03561e110bdcf5a74ce041ddb497164736f6c63430008120033"; + + // contract SendFunction { + // function send(address to) external payable { + // (bool success,) = to.call{value: msg.value}(""); + // if (!success) { + // revert("Failed to send ETH"); + // } + // } + // } + const auto sender_contract_code = + "608060405234801561001057600080fd5b50610278806100206000396000f3fe60806040526004361061001e5760003560e01c80633e58c5" + "8c14610023575b600080fd5b61003d60048036038101906100389190610152565b61003f565b005b60008173ffffffffffffffffffffffff" + "ffffffffffffffff1634604051610065906101b0565b60006040518083038185875af1925050503d80600081146100a2576040519150601f" + "19603f3d011682016040523d82523d6000602084013e6100a7565b606091505b50509050806100eb576040517f08c379a000000000000000" + "00000000000000000000000000000000000000000081526004016100e290610222565b60405180910390fd5b5050565b600080fd5b600073" + "ffffffffffffffffffffffffffffffffffffffff82169050919050565b600061011f826100f4565b9050919050565b61012f81610114565b" + "811461013a57600080fd5b50565b60008135905061014c81610126565b92915050565b600060208284031215610168576101676100ef565b" + "5b60006101768482850161013d565b91505092915050565b600081905092915050565b50565b600061019a60008361017f565b91506101a5" + "8261018a565b600082019050919050565b60006101bb8261018d565b9150819050919050565b600082825260208201905092915050565b7f" + "4661696c656420746f2073656e64204554480000000000000000000000000000600082015250565b600061020c6012836101c5565b915061" + "0217826101d6565b602082019050919050565b6000602082019050818103600083015261023b816101ff565b905091905056fea264697066" + "73582212205fd48a05d31cae1309b1a3bb8fe678c4bfee4cd28079acd90056ad228e18d82864736f6c63430008120033"; + + auto sender_keys = dev::KeyPair::create(); + const auto& from = sender_keys.address(); + const auto& sk = sender_keys.secret(); + cfg.genesis.state.initial_balances = {}; + cfg.genesis.state.initial_balances[from] = u256("10000000000000000000000"); + // disable balances check as we have internal transfer + assume_only_toplevel_transfers = false; + init(); + + net::rpc::eth::EthParams eth_rpc_params; + eth_rpc_params.chain_id = cfg.genesis.chain_id; + eth_rpc_params.gas_limit = cfg.genesis.dag.gas_limit; + eth_rpc_params.final_chain = SUT; + auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); + + auto nonce = 0; + auto trx1 = std::make_shared(nonce++, 0, 0, TEST_TX_GAS_LIMIT, dev::fromHex(receiver_contract_code), sk); + auto trx2 = std::make_shared(nonce++, 0, 0, TEST_TX_GAS_LIMIT, dev::fromHex(sender_contract_code), sk); + auto result = advance({trx1, trx2}); + auto receiver_contract_addr = result->trx_receipts[0].new_contract_address; + auto sender_contract_addr = result->trx_receipts[1].new_contract_address; + EXPECT_EQ(receiver_contract_addr, dev::right160(dev::sha3(dev::rlpList(from, 0)))); + + const auto call_data = "0x3e58c58c000000000000000000000000" + receiver_contract_addr->toString(); + const auto value = 10000; + { + Json::Value est(Json::objectValue); + est["to"] = dev::toHex(*sender_contract_addr); + est["from"] = dev::toHex(from); + est["value"] = value; + est["data"] = call_data; + auto estimate = dev::jsToInt(eth_json_rpc->eth_estimateGas(est)); + est["gas"] = dev::toJS(estimate); + eth_json_rpc->eth_call(est, "latest"); + } +} + +TEST_F(FinalChainTest, fee_rewards_distribution) { + auto sender_keys = dev::KeyPair::create(); + auto gas = 30000; + + const auto& receiver = dev::KeyPair::create().address(); + const auto& addr = sender_keys.address(); + const auto& sk = sender_keys.secret(); + cfg.genesis.state.initial_balances = {}; + cfg.genesis.state.initial_balances[addr] = 100000; + init(); + const auto gas_price = 1; + auto trx1 = std::make_shared(1, 100, gas_price, gas, dev::bytes(), sk, receiver); + + auto res = advance({trx1}); + auto gas_used = res->trx_receipts.front().gas_used; + auto blk = SUT->block_header(expected_blk_num); + auto proposer_balance = SUT->getBalance(blk->author); + EXPECT_EQ(proposer_balance.first, gas_used * gas_price); +} + +// This test should be last as state_api isn't destructed correctly because of exception TEST_F(FinalChainTest, initial_validator_exceed_maximum_stake) { const dev::KeyPair key = dev::KeyPair::create(); const dev::KeyPair validator_key = dev::KeyPair::create(); diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index e889fd3da7..132bb33ae0 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -242,7 +242,7 @@ TEST_F(FullNodeTest, db_test) { } batch = db.createWriteBatch(); - db.clearOwnVerifiedVotes(batch); + db.clearOwnVerifiedVotes(batch, verified_votes); db.commitWriteBatch(batch); EXPECT_TRUE(db.getOwnVerifiedVotes().empty()); @@ -285,9 +285,7 @@ TEST_F(FullNodeTest, db_test) { } EXPECT_TRUE(db.getRewardVotes().empty()); - batch = db.createWriteBatch(); - db.replaceRewardVotes(verified_votes, batch); - db.commitWriteBatch(batch); + for (auto v : verified_votes) db.saveExtraRewardVote(v); const auto db_reward_votes = db.getRewardVotes(); EXPECT_EQ(db_reward_votes.size(), verified_votes_map.size()); @@ -297,7 +295,7 @@ TEST_F(FullNodeTest, db_test) { const auto new_reward_vote = genVote(PbftVoteTypes::cert_vote, 10, 10, 3); verified_votes_map[new_reward_vote->getHash()] = new_reward_vote; - db.saveRewardVote(new_reward_vote); + db.saveExtraRewardVote(new_reward_vote); const auto new_db_reward_votes = db.getRewardVotes(); EXPECT_EQ(new_db_reward_votes.size(), verified_votes_map.size()); @@ -306,7 +304,12 @@ TEST_F(FullNodeTest, db_test) { } batch = db.createWriteBatch(); - db.replaceRewardVotes({}, batch); + + std::vector verified_votes_hashes, new_db_reward_votes_hashes; + for (const auto &v : verified_votes) verified_votes_hashes.emplace_back(v->getHash()); + for (const auto &v : new_db_reward_votes) new_db_reward_votes_hashes.emplace_back(v->getHash()); + db.removeExtraRewardVotes(verified_votes_hashes, batch); + db.removeExtraRewardVotes(new_db_reward_votes_hashes, batch); db.commitWriteBatch(batch); EXPECT_TRUE(db.getRewardVotes().empty()); @@ -682,31 +685,13 @@ TEST_F(FullNodeTest, sync_five_nodes) { // Prune state_db of one node auto prune_node = nodes[nodes.size() - 1]; - const uint32_t min_blocks_to_prune = 50; + const uint32_t min_blocks_to_prune = 30; // This ensures that we never prune blocks that are over proposal period - ASSERT_HAPPENS({20s, 100ms}, [&](auto &ctx) { - const auto max_level = prune_node->getDagManager()->getMaxLevel(); - const auto proposal_period = prune_node->getDB()->getProposalPeriodForDagLevel(max_level); - ASSERT_TRUE(proposal_period.has_value()); - context.dummy_transaction(); - WAIT_EXPECT_TRUE(ctx, ((*proposal_period) > min_blocks_to_prune)) + ASSERT_HAPPENS({40s, 100ms}, [&](auto &ctx) { + WAIT_EXPECT_TRUE(ctx, (prune_node->getPbftChain()->getPbftChainSize() > min_blocks_to_prune + kMaxLevelsPerPeriod)) }); prune_node->getFinalChain()->prune(min_blocks_to_prune); context.assert_balances_synced(); - - // transfer some coins to pruned node ... - context.coin_transfer(0, prune_node->getAddress(), init_bal, false); - context.wait_all_transactions_known(); - - std::cout << "Waiting until transaction is executed" << std::endl; - auto trx_cnt = context.getIssuedTrxCount(); - ASSERT_HAPPENS({20s, 500ms}, [&](auto &ctx) { - for (size_t i = 0; i < nodes.size(); ++i) - WAIT_EXPECT_EQ(ctx, nodes[i]->getDB()->getNumTransactionExecuted(), trx_cnt) - }); - - // Check balances after prune"; - context.assert_balances_synced(); } TEST_F(FullNodeTest, insert_anchor_and_compute_order) { @@ -1657,7 +1642,7 @@ TEST_F(FullNodeTest, graphql_test) { block = service::ScalarArgument::require("block", data); auto transactionAt = service::ScalarArgument::require("transactionAt", block); const auto hash2 = service::StringArgument::require("hash", transactionAt); - EXPECT_EQ(nodes[0]->getFinalChain()->transaction_hashes(2)->get(0).toString(), hash2); + EXPECT_EQ(nodes[0]->getFinalChain()->transaction_hashes(2)->at(0).toString(), hash2); } } // namespace taraxa::core_tests diff --git a/tests/hardfork_test.cpp b/tests/hardfork_test.cpp deleted file mode 100644 index e48784ed19..0000000000 --- a/tests/hardfork_test.cpp +++ /dev/null @@ -1,199 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -#include "cli/config.hpp" -#include "cli/tools.hpp" -#include "dag/dag.hpp" -#include "logger/logger.hpp" -#include "node/node.hpp" -#include "string" -#include "test_util/samples.hpp" -#include "transaction/transaction_manager.hpp" - -namespace taraxa::core_tests { - -// We need separate fixture for this tests because hardfork is overwriting config file. But we can't change config -// stored in global variable because values will change for next test cases -struct HardforkTest : WithDataDir { - FullNodeConfig node_cfg; - - HardforkTest() { - // creating config this way to prevent config files overwriting - auto cfg_filename = std::string("conf_taraxa1.json"); - auto p = DIR_CONF / cfg_filename; - auto w = DIR_CONF / std::string("wallet1.json"); - Json::Value test_node_wallet_json; - std::ifstream(w.string(), std::ifstream::binary) >> test_node_wallet_json; - node_cfg = FullNodeConfig(p.string(), test_node_wallet_json, data_dir / cfg_filename); - - fs::remove_all(node_cfg.data_path); - fs::create_directories(node_cfg.data_path); - - auto data_path_cfg = node_cfg.data_path / fs::path(node_cfg.json_file_name).filename(); - fs::copy_file(node_cfg.json_file_name, data_path_cfg); - node_cfg.json_file_name = data_path_cfg; - - addr_t root_node_addr("de2b1203d72d3549ee2f733b00b2789414c7cea5"); - node_cfg.genesis.state.initial_balances[root_node_addr] = 9007199254740991; - auto &dpos = *node_cfg.genesis.state.dpos; - dpos.genesis_state[root_node_addr][root_node_addr] = dpos.eligibility_balance_threshold; - // speed up block production - { - node_cfg.genesis.sortition.vrf.threshold_upper = 0xffff; - node_cfg.genesis.sortition.vdf.difficulty_min = 0; - node_cfg.genesis.sortition.vdf.difficulty_max = 3; - node_cfg.genesis.sortition.vdf.difficulty_stale = 3; - node_cfg.genesis.sortition.vdf.lambda_bound = 100; - // PBFT config - node_cfg.genesis.pbft.lambda_ms /= 20; - node_cfg.network.transaction_interval_ms /= 20; - } - } - - ~HardforkTest() { fs::remove_all(node_cfg.data_path); } - - HardforkTest(const HardforkTest &) = delete; - HardforkTest(HardforkTest &&) = delete; - HardforkTest &operator=(const HardforkTest &) = delete; - HardforkTest &operator=(HardforkTest &&) = delete; -}; - -TEST_F(HardforkTest, hardfork_override) { - auto default_json = cli::tools::getConfig(cli::Config::DEFAULT_CHAIN_ID); - auto default_hardforks = default_json["genesis"]["hardforks"]; - Json::Value config = default_json; - auto &state_cfg = config["genesis"]; - state_cfg["hardforks"].removeMember("fix_genesis_fork_block"); - - EXPECT_TRUE(state_cfg["hardforks"]["fix_genesis_fork_block"].isNull()); - cli::Config::addNewHardforks(config, default_json); - EXPECT_EQ(state_cfg["hardforks"], default_hardforks); - - state_cfg.removeMember("hardforks"); - EXPECT_TRUE(state_cfg["hardforks"].isNull()); - - cli::Config::addNewHardforks(config, default_json); - EXPECT_EQ(state_cfg["hardforks"], default_hardforks); -} - -TEST_F(HardforkTest, fix_genesis_fork_block_is_zero) { - auto &cfg = node_cfg.genesis; - cfg.state.hardforks.fix_genesis_fork_block = 0; - auto node = launch_nodes({node_cfg}).front(); - - auto dummy_trx = std::make_shared(1, 0, 0, 0, bytes(), node->getSecretKey(), node->getAddress()); - // broadcast dummy transaction - node->getTransactionManager()->insertTransaction(dummy_trx); - wait({100s, 500ms}, [&](auto &ctx) { - if (node->getFinalChain()->last_block_number() <= cfg.state.hardforks.fix_genesis_fork_block) { - ctx.fail(); - } - }); - EXPECT_EQ(cfg.state.initial_balances.begin()->second, - node->getConfig().genesis.state.initial_balances.begin()->second); -} - -TEST_F(HardforkTest, hardfork) { - auto &cfg = node_cfg.genesis; - cfg.state.hardforks.fix_genesis_fork_block = 10; - cfg.state.dpos->eligibility_balance_threshold = 100000; - cfg.state.dpos->vote_eligibility_balance_step.assign(cfg.state.dpos->eligibility_balance_threshold); - cfg.state.dpos->delegation_delay = 5; - cfg.state.dpos->delegation_locking_period = 5; - - auto random_node = addr_t::random(); - auto random_votes = 3; - for (auto &gb : cfg.state.initial_balances) { - gb.second = 110000000; - } - for (auto &gs : cfg.state.dpos->genesis_state) { - for (auto &b : gs.second) { - b.second = 1100000; - std::cout << b.first << ": " << b.second << std::endl; - } - gs.second.emplace(random_node, random_votes * cfg.state.dpos->vote_eligibility_balance_step); - } - - auto node = launch_nodes({node_cfg}).front(); - auto nonce = 0; - auto dummy_trx = [&nonce, node]() { - auto dummy_trx = std::make_shared(nonce++, 0, 0, 0, bytes(), node->getSecretKey(), node->getAddress()); - // broadcast dummy transaction - node->getTransactionManager()->insertTransaction(dummy_trx); - }; - dummy_trx(); - node->getFinalChain()->block_finalized_.subscribe([&](const std::shared_ptr &res) { - const auto block_num = res->final_chain_blk->number; - if (cfg.state.hardforks.fix_genesis_fork_block == block_num) { - return; - } - dummy_trx(); - dummy_trx(); - }); - std::map balances_before; - for (const auto &b : node->getConfig().genesis.state.initial_balances) { - auto balance = node->getFinalChain()->get_account(b.first)->balance; - balances_before.emplace(b.first, balance); - } - auto votes_count = 11; - EXPECT_EQ(votes_count + random_votes, node->getFinalChain()->dpos_eligible_total_vote_count(0)); - EXPECT_EQ(random_votes, node->getFinalChain()->dpos_eligible_vote_count(0, random_node)); - - wait({100s, 500ms}, [&](auto &ctx) { - if (node->getFinalChain()->last_block_number() < cfg.state.hardforks.fix_genesis_fork_block) { - ctx.fail(); - } - }); - - u256 dpos_genesis_sum = 0; - // Verify DPOS initial balances increasing - for (const auto &gs : node->getConfig().genesis.state.dpos->genesis_state) { - for (const auto &b : gs.second) { - EXPECT_EQ(b.second, node->getFinalChain()->get_staking_balance(b.first)); - dpos_genesis_sum += b.second; - } - } - - for (const auto &b : node->getConfig().genesis.state.initial_balances) { - auto balance_after = node->getFinalChain()->get_account(b.first)->balance; - auto res = b.second - dpos_genesis_sum; - EXPECT_EQ(res, balance_after); - } - - auto block = node->getFinalChain()->last_block_number(); - EXPECT_EQ(votes_count, node->getFinalChain()->dpos_eligible_total_vote_count(block)); - EXPECT_EQ(0, node->getFinalChain()->dpos_eligible_vote_count(block, random_node)); - - // check for dpos_query method - { - const auto &genesis_sender = cfg.state.dpos->genesis_state.begin()->first; - - state_api::DPOSQuery::AccountQuery acc_q; - acc_q.with_staking_balance = true; - acc_q.with_outbound_deposits = true; - acc_q.with_inbound_deposits = true; - state_api::DPOSQuery q; - q.with_eligible_count = true; - q.account_queries[genesis_sender] = acc_q; - - // auto q_res = node->getFinalChain()->dpos_query(q); - auto res = q_res.account_results[genesis_sender]; - EXPECT_EQ(res.inbound_deposits.size(), 1); - EXPECT_EQ(res.inbound_deposits.begin()->first, genesis_sender); - EXPECT_EQ(res.inbound_deposits.begin()->second, res.staking_balance); - } - - EXPECT_EQ(cfg.state.dpos->vote_eligibility_balance_step * kOneTara, - node->getConfig().genesis.state.dpos->vote_eligibility_balance_step); - EXPECT_NE(cfg.state.initial_balances.begin()->second, - node->getConfig().genesis.state.initial_balances.begin()->second); - EXPECT_NE(cfg.state.dpos->eligibility_balance_threshold, - node->getConfig().genesis.state.dpos->eligibility_balance_threshold); -} - -} // namespace taraxa::core_tests diff --git a/tests/network_test.cpp b/tests/network_test.cpp index 2e6c744455..5993b155a7 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -52,7 +52,7 @@ TEST_F(NetworkTest, transfer_block) { SharedTransactions transactions({g_signed_trx_samples[0], g_signed_trx_samples[1]}); nw2->getSpecificHandler()->onNewTransactions(std::move(transactions)); - EXPECT_HAPPENS({10s, 200ms}, [&](auto& ctx) { + EXPECT_HAPPENS({60s, 100ms}, [&](auto& ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw1->getPeerCount(), 1) @@ -73,8 +73,7 @@ TEST_F(NetworkTest, transfer_block) { ASSERT_EQ(1, num_received); } -// Test creates two Network setup and verifies sending blocks between is successfull -// This test can not work anymore as we are marking other nodes as malicous becasue of invalid dag blocks +// Test creates two Network setup and verifies sending blocks between is successful TEST_F(NetworkTest, transfer_lot_of_blocks) { auto node_cfgs = make_node_cfgs(2, 1, 20); auto nodes = launch_nodes(node_cfgs); @@ -91,9 +90,8 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { const auto nw1 = node1->getNetwork(); const auto nw2 = node2->getNetwork(); - const auto trxs = samples::createSignedTrxSamples(0, 1500, g_secret); + auto trxs = samples::createSignedTrxSamples(0, 1500, g_secret); const auto estimation = node1->getTransactionManager()->estimateTransactionGas(trxs[0], {}); - const std::vector estimations(trxs.size(), estimation); // node1 add one valid block const auto proposal_level = 1; @@ -106,48 +104,41 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trxs[0]}); vdf.computeVdfSolution(sortition_params, vdf_msg, false); DagBlock blk(dag_genesis, proposal_level, {}, {trxs[0]->getHash()}, estimation, vdf, node1->getSecretKey()); - auto block_hash = blk.getHash(); + const auto block_hash = blk.getHash(); + dag_mgr1->addDagBlock(std::move(blk), {trxs[0]}); std::vector> dag_blocks; - dag_blocks.emplace_back(std::make_shared(std::move(blk))); - // creating lot of blocks just for size - std::vector trx_hashes; - std::vector> verified_transactions; - trx_hashes.reserve(trxs.size()); - verified_transactions.reserve(trxs.size()); - - for (const auto& trx : trxs) { - trx_hashes.push_back(trx->getHash()); - verified_transactions.push_back(trx); - } - - for (int i = 0; i < 100; ++i) { + { const auto proposal_period = *db1->getProposalPeriodForDagLevel(proposal_level + 1); const auto period_block_hash = db1->getPeriodBlockHash(proposal_period); const auto sortition_params = dag_mgr1->sortitionParamsManager().getSortitionParams(proposal_period); - vdf_sortition::VdfSortition vdf(sortition_params, node1->getVrfSecretKey(), - VrfSortitionBase::makeVrfInput(proposal_level + 1, period_block_hash), 1, 1); - DagBlock blk(block_hash, proposal_level + 1, {}, {trxs[i + 1]->getHash()}, {}, vdf, node1->getSecretKey()); - dag_blocks.emplace_back(std::make_shared(blk)); + + for (int i = 0; i < 100; ++i) { + vdf_sortition::VdfSortition vdf(sortition_params, node1->getVrfSecretKey(), + VrfSortitionBase::makeVrfInput(proposal_level + 1, period_block_hash), 1, 1); + dev::bytes vdf_msg = DagManager::getVdfMessage(block_hash, {trxs[i]}); + vdf.computeVdfSolution(sortition_params, vdf_msg, false); + DagBlock blk(block_hash, proposal_level + 1, {}, {trxs[i]->getHash()}, estimation, vdf, node1->getSecretKey()); + dag_blocks.emplace_back(std::make_shared(blk)); + } } - for (auto trx : verified_transactions) - node1->getTransactionManager()->insertValidatedTransaction(std::move(trx), TransactionStatus::Verified); + for (auto trx : trxs) { + auto tx = trx; + node1->getTransactionManager()->insertValidatedTransaction(std::move(tx), TransactionStatus::Verified); + } for (size_t i = 0; i < dag_blocks.size(); i++) { if (dag_mgr1->verifyBlock(*dag_blocks[i]) == DagManager::VerifyBlockReturnType::Verified) dag_mgr1->addDagBlock(DagBlock(*dag_blocks[i]), {trxs[i]}); } wait({1s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr1->getDagBlock(block_hash), nullptr) }); - - taraxa::thisThreadSleepForSeconds(1); const auto node1_period = node1->getPbftChain()->getPbftChainSize(); const auto node2_period = node2->getPbftChain()->getPbftChainSize(); std::cout << "node1 period " << node1_period << ", node2 period " << node2_period << std::endl; nw1->getSpecificHandler()->sendBlocks( - nw2->getNodeId(), std::move(dag_blocks), {}, node2_period, node1_period); - + nw2->getNodeId(), std::move(dag_blocks), std::move(trxs), node2_period, node1_period); std::cout << "Waiting Sync ..." << std::endl; - wait({30s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr2->getDagBlock(block_hash), nullptr) }); + wait({120s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr2->getDagBlock(block_hash), nullptr) }); } TEST_F(NetworkTest, update_peer_chainsize) { @@ -327,7 +318,7 @@ TEST_F(NetworkTest, transfer_transaction) { nw1->start(); nw2->start(); - EXPECT_HAPPENS({10s, 200ms}, [&](auto& ctx) { + EXPECT_HAPPENS({60s, 100ms}, [&](auto& ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw1->getPeerCount(), 1) @@ -336,15 +327,18 @@ TEST_F(NetworkTest, transfer_transaction) { auto nw1_nodeid = nw1->getNodeId(); auto nw2_nodeid = nw2->getNodeId(); - EXPECT_NE(nw1->getPeer(nw2_nodeid), nullptr); - EXPECT_NE(nw2->getPeer(nw1_nodeid), nullptr); + + const auto peer2 = nw1->getPeer(nw2_nodeid); + const auto peer1 = nw2->getPeer(nw1_nodeid); + EXPECT_NE(peer2, nullptr); + EXPECT_NE(peer1, nullptr); SharedTransactions transactions; transactions.push_back(g_signed_trx_samples[0]); transactions.push_back(g_signed_trx_samples[1]); transactions.push_back(g_signed_trx_samples[2]); - nw2->getSpecificHandler()->sendTransactions(nw2->getPeer(nw1_nodeid), + nw2->getSpecificHandler()->sendTransactions(peer1, std::move(transactions)); EXPECT_HAPPENS({2s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, nw1->getReceivedTransactionsCount(), 3) }); @@ -371,7 +365,7 @@ TEST_F(NetworkTest, save_network) { nw2->start(); nw3->start(); - EXPECT_HAPPENS({120s, 500ms}, [&](auto& ctx) { + EXPECT_HAPPENS({120s, 100ms}, [&](auto& ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); nw3->setPendingPeersToReady(); @@ -388,7 +382,7 @@ TEST_F(NetworkTest, save_network) { nw2->start(); nw3->start(); - EXPECT_HAPPENS({120s, 500ms}, [&](auto& ctx) { + EXPECT_HAPPENS({120s, 100ms}, [&](auto& ctx) { nw2->setPendingPeersToReady(); nw3->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw2->getPeerCount(), 1) @@ -612,7 +606,7 @@ TEST_F(NetworkTest, node_pbft_sync) { beneficiary, node1->getSecretKey(), {}); std::vector> votes_for_pbft_blk2; votes_for_pbft_blk2.emplace_back( - node1->getVoteManager()->generateVote(pbft_block2.getBlockHash(), PbftVoteTypes::cert_vote, 2, 2, 3)); + node1->getVoteManager()->generateVoteWithWeight(pbft_block2.getBlockHash(), PbftVoteTypes::cert_vote, 2, 1, 3)); std::cout << "Generate 1 vote for second PBFT block" << std::endl; // node1 put block2 into pbft chain and store into DB // Add cert votes in DB @@ -626,7 +620,9 @@ TEST_F(NetworkTest, node_pbft_sync) { period_data2.transactions.push_back(g_signed_trx_samples[3]); db1->savePeriodData(period_data2, batch); - db1->replaceRewardVotes(votes_for_pbft_blk2, batch); + node1->getVoteManager()->addVerifiedVote(votes_for_pbft_blk2[0]); + db1->replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType::CertVotedBlock, votes_for_pbft_blk2, batch); + node1->getVoteManager()->resetRewardVotes(2, 1, 3, pbft_block2.getBlockHash(), batch); // Update pbft chain pbft_chain1->updatePbftChain(pbft_block2.getBlockHash(), pbft_block2.getPivotDagBlockHash()); @@ -878,7 +874,7 @@ TEST_F(NetworkTest, pbft_next_votes_sync_in_same_round) { node2->getPbftManager()->setPbftRound(2); // Node 1 broadcast his votes - node1_pbft_mgr->broadcastSoftAndNextVotes(false); + node1_pbft_mgr->testBroadcatVotesFunctionality(); // Node 2 should receive votes from node 1, node 1 has its own 2 votes EXPECT_EQ(node1_vote_mgr->getVerifiedVotesSize(), 2); EXPECT_HAPPENS({5s, 100ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, node2_vote_mgr->getVerifiedVotesSize(), 3) }); @@ -1400,7 +1396,7 @@ TEST_F(NetworkTest, suspicious_packets) { TEST_F(NetworkTest, dag_syncing_limit) { network::tarcap::TaraxaPeer peer1, peer2; - const uint64_t dag_sync_limit = 300; + const uint64_t dag_sync_limit = 60; EXPECT_TRUE(peer1.dagSyncingAllowed()); peer1.peer_dag_synced_ = true; diff --git a/tests/p2p_test.cpp b/tests/p2p_test.cpp index 1e9dcf31be..9a215160a7 100644 --- a/tests/p2p_test.cpp +++ b/tests/p2p_test.cpp @@ -311,7 +311,7 @@ TEST_F(P2PTest, multiple_capabilities) { std::filesystem::remove_all("/tmp/nw3"); }; auto wait_for_connection = [](std::shared_ptr nw1, std::shared_ptr nw2) { - EXPECT_HAPPENS({15s, 500ms}, [&](auto &ctx) { + EXPECT_HAPPENS({60s, 100ms}, [&](auto &ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw1->getPeerCount(), 1) diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index 95f38f2ef0..adcaa4b20f 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -192,61 +192,6 @@ struct PbftManagerTest : NodesTest { } }; -// Test that after some amount of elapsed time will not continue soft voting for same value -TEST_F(PbftManagerTest, terminate_soft_voting_pbft_block) { - auto node_cfgs = make_node_cfgs(1, 1, 20); - makeNodesWithNonces(node_cfgs); - - auto pbft_mgr = nodes[0]->getPbftManager(); - auto vote_mgr = nodes[0]->getVoteManager(); - pbft_mgr->stop(); - std::cout << "PBFT manager stopped" << std::endl; - - // Generate bogus votes - auto stale_block_hash = blk_hash_t("0000000100000000000000000000000000000000000000000000000000000000"); - auto propose_vote = vote_mgr->generateVote(stale_block_hash, PbftVoteTypes::propose_vote, 2, 2, 1); - propose_vote->calculateWeight(1, 1, 1); - vote_mgr->addVerifiedVote(propose_vote); - - // uint64_t time_till_stale_ms = 1000; - // std::cout << "Set max wait for soft voted value to " << time_till_stale_ms << "ms..." << std::endl; - // pbft_mgr->setMaxWaitForSoftVotedBlock_ms(time_till_stale_ms); - // pbft_mgr->setMaxWaitForNextVotedBlock_ms(std::numeric_limits::max()); - - auto sleep_time = 1100; - std::cout << "Sleep " << sleep_time << "ms so that last soft voted value of " << stale_block_hash.abridged() - << " becomes stale..." << std::endl; - taraxa::thisThreadSleepForMilliSeconds(sleep_time); - - std::cout << "Initialize PBFT manager at round 2 step 2" << std::endl; - pbft_mgr->setPbftRound(2); - pbft_mgr->setPbftStep(2); - pbft_mgr->resumeSingleState(); - std::cout << "Into cert voted state in round 2..." << std::endl; - EXPECT_EQ(pbft_mgr->getPbftRound(), 2); - EXPECT_EQ(pbft_mgr->getPbftStep(), 3); - - std::cout << "Check did not soft vote for stale soft voted value of " << stale_block_hash.abridged() << "..." - << std::endl; - bool skipped_soft_voting = true; - auto votes = vote_mgr->getVerifiedVotes(); - for (const auto &v : votes) { - if (PbftVoteTypes::soft_vote == v->getType()) { - if (v->getBlockHash() == stale_block_hash) { - skipped_soft_voting = false; - } - std::cout << "Found soft voted value of " << v->getBlockHash().abridged() << " in round 2" << std::endl; - } - } - EXPECT_EQ(skipped_soft_voting, true); - - auto start_round = pbft_mgr->getPbftRound(); - pbft_mgr->resume(); - - std::cout << "Wait ensure node is still advancing in rounds... " << std::endl; - EXPECT_HAPPENS({60s, 50ms}, [&](auto &ctx) { WAIT_EXPECT_NE(ctx, start_round, pbft_mgr->getPbftRound()) }); -} - // Test that after some amount of elapsed time will give up on the next voting value if corresponding DAG blocks can't // be found @@ -870,54 +815,6 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { EXPECT_FALSE(node->getPbftManager()->checkBlockWeight(period_data.dag_blocks)); } -TEST_F(PbftManagerWithDagCreation, DISABLED_pbft_block_is_overweighted) { - auto node_cfgs = make_node_cfgs(1, 5, true); - node_cfgs.front().genesis.dag.gas_limit = 500000; - node_cfgs.front().genesis.pbft.gas_limit = 600000; - makeNode(); - deployContract(); - node->getDagBlockProposer()->stop(); - generateAndApplyInitialDag(); - - EXPECT_HAPPENS({10s, 500ms}, - [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nonce, node->getDB()->getNumTransactionExecuted() + 1); }); - - node->getPbftManager()->stop(); - // create pbft block - auto chain_size_before = node->getPbftChain()->getPbftChainSize(); - { - auto blocks_with_txs = generateDagBlocks(10, 3, 1); - insertBlocks(blocks_with_txs); - auto dag_block_hash = blocks_with_txs.back().blk.getHash(); - - // get DAG block and transaction order - const auto propose_period = node->getPbftChain()->getPbftChainSize() + 1; - auto dag_block_order = node->getDagManager()->getDagBlockOrder(dag_block_hash, propose_period); - ASSERT_TRUE(!dag_block_order.empty()); - - std::vector trx_hashes; - for (const auto &bt : blocks_with_txs) { - std::transform(bt.trxs.begin(), bt.trxs.end(), std::back_inserter(trx_hashes), - [](const auto &t) { return t->getHash(); }); - } - auto order_hash = node->getPbftManager()->calculateOrderHash(dag_block_order); - - const auto &last_hash = node->getPbftChain()->getLastPbftBlockHash(); - auto reward_votes = node->getDB()->getRewardVotes(); - std::vector reward_votes_hashes; - std::transform(reward_votes.begin(), reward_votes.end(), std::back_inserter(reward_votes_hashes), - [](const auto &v) { return v->getHash(); }); - const auto pbft_block = - std::make_shared(last_hash, dag_block_hash, order_hash, kNullBlockHash, propose_period, - node->getAddress(), node->getSecretKey(), std::move(reward_votes_hashes)); - // node->getPbftChain()->pushUnverifiedPbftBlock(pbft_block); - } - - EXPECT_HAPPENS({60s, 500ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getPbftChain()->getPbftChainSize(), chain_size_before + 1); - }); -} - TEST_F(PbftManagerWithDagCreation, proposed_blocks) { auto db = std::make_shared(data_dir); ProposedBlocks proposed_blocks(db); diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp new file mode 100644 index 0000000000..cce156c735 --- /dev/null +++ b/tests/rewards_stats_test.cpp @@ -0,0 +1,185 @@ +#include "rewards/rewards_stats.hpp" + +#include +#include +#include +#include + +#include "test_util/gtest.hpp" +#include "test_util/samples.hpp" + +namespace taraxa::core_tests { + +auto g_secret = dev::Secret("3800b2875669d9b2053c1aff9224ecfdc411423aac5b5a73d7a45ced1c3b9dcd", + dev::Secret::ConstructFromStringType::FromHex); +auto g_key_pair = Lazy([] { return dev::KeyPair(g_secret); }); + +struct RewardsStatsTest : NodesTest {}; + +class TestableRewardsStats : public rewards::Stats { + public: + TestableRewardsStats(const Hardforks::RewardsDistributionMap& rdm, std::shared_ptr db) + : rewards::Stats(100, rdm, db, [](auto) { return 100; }) {} + std::vector getStats() { return blocks_stats_; } +}; + +class TestableBlockStats : public rewards::BlockStats { + public: + const addr_t& getAuthor() const { return block_author_; } +}; + +TEST_F(RewardsStatsTest, defaultDistribution) { + auto db = std::make_shared(data_dir / "db"); + + std::vector> empty_votes; + auto rewards_stats = TestableRewardsStats({}, db); + + for (auto i = 1; i < 5; ++i) { + PeriodData block(make_simple_pbft_block(blk_hash_t(i), i), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_EQ(stats.size(), 1); + ASSERT_TRUE(rewards_stats.getStats().empty()); + } +} + +TEST_F(RewardsStatsTest, statsSaving) { + auto db = std::make_shared(data_dir / "db"); + + // distribute every 5 blocks + Hardforks::RewardsDistributionMap distribution{{0, 5}}; + + std::vector> empty_votes; + std::vector block_authors; + { + auto rewards_stats = TestableRewardsStats(distribution, db); + + for (auto i = 1; i < 5; ++i) { + auto kp = dev::KeyPair::create(); + block_authors.push_back(kp.address()); + + PeriodData block(make_simple_pbft_block(blk_hash_t(i), i, kp.secret()), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_EQ(rewards_stats.getStats().size(), block_authors.size()); + ASSERT_TRUE(stats.empty()); + } + } + { + // Load from db + auto rewards_stats = TestableRewardsStats(distribution, db); + auto stats = rewards_stats.getStats(); + ASSERT_EQ(rewards_stats.getStats().size(), block_authors.size()); + + for (size_t i = 0; i < stats.size(); ++i) { + auto stats_with_get = reinterpret_cast(&stats[i]); + ASSERT_EQ(stats_with_get->getAuthor(), block_authors[i]); + } + } +} + +TEST_F(RewardsStatsTest, statsCleaning) { + auto db = std::make_shared(data_dir / "db"); + + // distribute every 5 blocks + Hardforks::RewardsDistributionMap distribution{{0, 5}}; + + std::vector> empty_votes; + std::vector block_authors; + { + auto rewards_stats = TestableRewardsStats(distribution, db); + + for (auto i = 1; i < 5; ++i) { + auto kp = dev::KeyPair::create(); + block_authors.push_back(kp.address()); + + PeriodData block(make_simple_pbft_block(blk_hash_t(i), i, kp.secret()), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_EQ(rewards_stats.getStats().size(), block_authors.size()); + ASSERT_TRUE(stats.empty()); + } + + // Process block 5 after which we should have no stats elements in db + PeriodData block(make_simple_pbft_block(blk_hash_t(5), 5), empty_votes); + rewards_stats.processStats(block); + } + + // Load from db + auto rewards_stats = TestableRewardsStats(distribution, db); + ASSERT_TRUE(rewards_stats.getStats().empty()); +} + +TEST_F(RewardsStatsTest, statsProcessing) { + auto db = std::make_shared(data_dir / "db"); + // distribute every 10 blocks + auto rewards_stats = TestableRewardsStats({{0, 10}}, db); + + std::vector> empty_votes; + std::vector block_authors; + + // make blocks [1,9] and process them. output of processStats should be empty + for (auto i = 1; i < 10; ++i) { + auto kp = dev::KeyPair::create(); + block_authors.push_back(kp.address()); + + PeriodData block(make_simple_pbft_block(blk_hash_t(i), i, kp.secret()), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_TRUE(stats.empty()); + ASSERT_EQ(rewards_stats.getStats().size(), block_authors.size()); + } + + auto kp = dev::KeyPair::create(); + block_authors.push_back(kp.address()); + + PeriodData block(make_simple_pbft_block(blk_hash_t(10), 10, kp.secret()), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_EQ(stats.size(), block_authors.size()); + + for (size_t i = 0; i < stats.size(); ++i) { + auto stats_with_get = reinterpret_cast(&stats[i]); + ASSERT_EQ(stats_with_get->getAuthor(), block_authors[i]); + } + ASSERT_TRUE(rewards_stats.getStats().empty()); +} + +TEST_F(RewardsStatsTest, distributionChange) { + auto db = std::make_shared(data_dir / "db"); + + Hardforks::RewardsDistributionMap distribution{{6, 5}, {11, 2}}; + + auto rewards_stats = TestableRewardsStats(distribution, db); + + std::vector> empty_votes; + uint64_t period = 1; + for (; period <= 5; ++period) { + PeriodData block(make_simple_pbft_block(blk_hash_t(period), period), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_FALSE(stats.empty()); + } + { + // make blocks [1,9] and process them. output of processStats should be empty + for (; period < 10; ++period) { + PeriodData block(make_simple_pbft_block(blk_hash_t(period), period), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_TRUE(stats.empty()); + } + PeriodData block(make_simple_pbft_block(blk_hash_t(period), period), empty_votes); + auto stats = rewards_stats.processStats(block); + } + + PeriodData block(make_simple_pbft_block(blk_hash_t(period), period), empty_votes); + auto stats = rewards_stats.processStats(block); +} + +} // namespace taraxa::core_tests + +using namespace taraxa; +int main(int argc, char** argv) { + taraxa::static_init(); + + auto logging = logger::createDefaultLoggingConfig(); + logging.verbosity = logger::Verbosity::Error; + addr_t node_addr; + logging.InitLogging(node_addr); + + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} \ No newline at end of file diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index 3675644792..55157b691a 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include "network/rpc/eth/Eth.h" #include "test_util/gtest.hpp" @@ -20,13 +21,20 @@ TEST_F(RPCTest, eth_estimateGas) { auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); const auto from = dev::toHex(dev::toAddress(node_cfg.front().node_secret)); + auto check_estimation_is_in_range = [&](const Json::Value& trx, const std::string& e) { + auto estimate = dev::jsToInt(eth_json_rpc->eth_estimateGas(trx)); + auto expected = dev::jsToInt(e); + EXPECT_GE(estimate, expected); + EXPECT_GE(expected / 20, estimate - expected); + }; + // Contract creation estimations with author + without author { Json::Value trx(Json::objectValue); trx["data"] = samples::greeter_contract_code; - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5ccc5"); trx["from"] = from; - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5ccc5"); } // Contract creation with value @@ -34,7 +42,7 @@ TEST_F(RPCTest, eth_estimateGas) { Json::Value trx(Json::objectValue); trx["value"] = 1; trx["data"] = samples::greeter_contract_code; - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5ccc5"); } // Simple transfer estimations with author + without author @@ -42,9 +50,9 @@ TEST_F(RPCTest, eth_estimateGas) { Json::Value trx(Json::objectValue); trx["value"] = 1; trx["to"] = dev::toHex(addr_t::random()); - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5208"); // 21k + check_estimation_is_in_range(trx, "0x5208"); // 21k trx["from"] = from; - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5208"); // 21k + check_estimation_is_in_range(trx, "0x5208"); // 21k } // Test throw on failed transaction @@ -57,13 +65,6 @@ TEST_F(RPCTest, eth_estimateGas) { } } -#define EXPECT_THROW_WITH(statement, expected_exception, msg) \ - try { \ - statement; \ - } catch (const expected_exception& e) { \ - ASSERT_EQ(std::string(msg), std::string(e.what())); \ - } - TEST_F(RPCTest, eth_call) { auto node_cfg = make_node_cfgs(1); auto nodes = launch_nodes(node_cfg); @@ -232,6 +233,27 @@ TEST_F(RPCTest, eth_getBlock) { EXPECT_EQ(4, dev::jsToU256(block["number"].asString())); EXPECT_GT(dev::jsToU256(block["totalReward"].asString()), 0); } + +TEST_F(RPCTest, eip_1898) { + auto node_cfg = make_node_cfgs(1); + auto nodes = launch_nodes(node_cfg); + net::rpc::eth::EthParams eth_rpc_params; + eth_rpc_params.chain_id = node_cfg.front().genesis.chain_id; + eth_rpc_params.gas_limit = node_cfg.front().genesis.dag.gas_limit; + eth_rpc_params.final_chain = nodes.front()->getFinalChain(); + auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); + + const auto from = dev::toHex(dev::toAddress(node_cfg.front().node_secret)); + + Json::Value zero_block(Json::objectValue); + zero_block["blockNumber"] = dev::toJS(0); + EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, zero_block)); + + Json::Value genesis_block(Json::objectValue); + genesis_block["blockHash"] = dev::toJS(*nodes.front()->getFinalChain()->block_hash(0)); + EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, genesis_block)); +} + } // namespace taraxa::core_tests using namespace taraxa; diff --git a/tests/state_api_test.cpp b/tests/state_api_test.cpp index 6c49267725..9d27f86220 100644 --- a/tests/state_api_test.cpp +++ b/tests/state_api_test.cpp @@ -25,9 +25,8 @@ struct TestBlock { h256 state_root; EVMBlock evm_block; vector transactions; - vector uncle_blocks; - RLP_FIELDS_DEFINE_INPLACE(hash, state_root, evm_block, transactions, uncle_blocks) + RLP_FIELDS_DEFINE_INPLACE(hash, state_root, evm_block, transactions) }; template @@ -59,7 +58,6 @@ TEST_F(StateAPITest, DISABLED_dpos_integration) { // dpos_cfg.eligibility_balance_threshold = 1000; // dpos_cfg.vote_eligibility_balance_step = 1000; // addr_1_bal_expected -= dpos_cfg.genesis_state[make_addr(1)][make_addr(1)] = dpos_cfg.eligibility_balance_threshold; - // chain_cfg.hardforks.fix_genesis_fork_block = 0; // uint64_t curr_blk = 0; // StateAPI SUT([&](auto /*n*/) -> h256 { assert(false); }, // @@ -203,8 +201,7 @@ TEST_F(StateAPITest, DISABLED_eth_mainnet_smoke) { progress_pct_log_threshold += 10; } auto const& test_block = test_blocks[blk_num]; - auto const& result = - SUT.transition_state(test_block.evm_block, test_block.transactions, {}, test_block.uncle_blocks); + auto const& result = SUT.transition_state(test_block.evm_block, test_block.transactions); ASSERT_EQ(result.state_root, test_block.state_root); SUT.transition_state_commit(); } diff --git a/tests/test_util/include/test_util/test_util.hpp b/tests/test_util/include/test_util/test_util.hpp index ab6a4b3731..951a22dec3 100644 --- a/tests/test_util/include/test_util/test_util.hpp +++ b/tests/test_util/include/test_util/test_util.hpp @@ -122,6 +122,14 @@ bool wait(const wait_opts& opts, const std::function& poller); EXPECT_GE(o1, o2); \ } +#define EXPECT_THROW_WITH(statement, expected_exception, msg) \ + try { \ + statement; \ + EXPECT_TRUE("No exception thrown" && false); \ + } catch (const expected_exception& e) { \ + EXPECT_EQ(std::string(msg), std::string(e.what())); \ + } + struct TransactionClient { enum class TransactionStage { created, @@ -161,7 +169,7 @@ state_api::BalanceMap effective_initial_balances(const state_api::Config& cfg); u256 own_effective_genesis_bal(const FullNodeConfig& cfg); std::shared_ptr make_simple_pbft_block(const h256& hash, uint64_t period, - const h256& anchor_hash = kNullBlockHash); + const secret_t& pk = secret_t::random()); std::vector getOrderedDagBlocks(const std::shared_ptr& db); diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index 6599c4ea84..7c13b18e86 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -96,10 +96,10 @@ u256 own_effective_genesis_bal(const FullNodeConfig& cfg) { return effective_initial_balances(cfg.genesis.state)[dev::toAddress(dev::Secret(cfg.node_secret))]; } -std::shared_ptr make_simple_pbft_block(const h256& hash, uint64_t period, const h256& anchor_hash) { +std::shared_ptr make_simple_pbft_block(const h256& hash, uint64_t period, const secret_t& pk) { std::vector reward_votes_hashes; - return std::make_shared(hash, anchor_hash, kNullBlockHash, kNullBlockHash, period, addr_t(0), - secret_t::random(), std::move(reward_votes_hashes)); + return std::make_shared(hash, kNullBlockHash, kNullBlockHash, kNullBlockHash, period, addr_t(0), pk, + std::move(reward_votes_hashes)); } std::vector getOrderedDagBlocks(const std::shared_ptr& db) { @@ -302,7 +302,7 @@ std::vector NodesTest::make_node_cfgs(size_t total_count bool NodesTest::wait_connect(const std::vector>& nodes) { auto num_peers_connected = nodes.size() - 1; - return wait({30s, 1s}, [&](auto& ctx) { + return wait({60s, 100ms}, [&](auto& ctx) { for (const auto& node : nodes) { if (ctx.fail_if(node->getNetwork()->getPeerCount() < num_peers_connected)) { return; diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index 061bde4c2a..53637d89a0 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -76,7 +76,7 @@ TEST_F(TransactionTest, sig) { ASSERT_THROW(Transaction(dev::jsToBytes("0xf84980808080808024a01404adc97c8b58fef303b2862d0e72378" "4fb635e7237e0e8d3ea33bbea19c36ca0229e80d57ba91a0f347686" "30fd21ad86e4c403b307de9ac4550d0ccc81c90fe3")), - Transaction::InvalidSignature); + Transaction::InvalidFormat); std::vector> valid_cases{ {0, "0xf647d1d47ce927ce2fb9f57e4e2a3c32b037c5e544b44611077f5cc6980b0bc2"}, {1, "0x49c1cb845df5d3ed238ca37ad25ca96f417e4f22d7911224cf3c2a725985e7ff"}, @@ -112,7 +112,7 @@ TEST_F(TransactionTest, sig) { } } ASSERT_NE(Transaction(with_modified_payload.out()).getSender(), sender); - ASSERT_THROW(Transaction(with_invalid_signature.out()).getSender(), Transaction::InvalidSignature); + ASSERT_THROW(Transaction(with_invalid_signature.out()).getSender(), Transaction::InvalidTransaction); } } } @@ -211,8 +211,6 @@ TEST_F(TransactionTest, transaction_low_nonce) { SharedTransactions trxs{trx_1, trx_2}; period_data.transactions = trxs; auto batch = db->createWriteBatch(); - db->saveTransactionPeriod(trx_1->getHash(), 1, 0); - db->saveTransactionPeriod(trx_2->getHash(), 1, 0); db->savePeriodData(period_data, batch); db->commitWriteBatch(batch); final_chain->finalize(std::move(period_data), {dag_blk.getHash()}).get(); diff --git a/tests/vote_test.cpp b/tests/vote_test.cpp index 20cd913912..b6c8dab36e 100644 --- a/tests/vote_test.cpp +++ b/tests/vote_test.cpp @@ -58,30 +58,29 @@ TEST_F(VoteTest, verified_votes) { TEST_F(VoteTest, round_determine_from_next_votes) { auto node = create_nodes(1, true /*start*/).front(); - // stop PBFT manager, that will place vote - node->getPbftManager()->stop(); + auto pbft_mgr = node->getPbftManager(); + auto vote_mgr = node->getVoteManager(); + // stop PBFT manager, that will place vote + pbft_mgr->stop(); clearAllVotes({node}); - auto vote_mgr = node->getVoteManager(); - size_t two_t_plus_one = 2; + const auto [current_round, current_period] = pbft_mgr->getPbftRoundAndPeriod(); - // Generate votes in 3 rounds, 2 steps, each step have 3 votes + // Generate votes for a few future rounds blk_hash_t voted_block_hash(1); PbftVoteTypes type = PbftVoteTypes::next_vote; - for (int i = 10; i <= 12; i++) { - for (int j = 4; j <= 5; j++) { - PbftPeriod period = i; - PbftRound round = i; - PbftStep step = j; - auto vote = vote_mgr->generateVote(voted_block_hash, type, period, round, step); - vote->calculateWeight(3, 3, 3); - vote_mgr->addVerifiedVote(vote); - } + const PbftRound kMaxRound = current_round + 3; + PbftStep step = 5; + for (PbftRound round = current_round; round <= kMaxRound; round++) { + auto vote = vote_mgr->generateVote(voted_block_hash, type, current_period, round, step); + vote->calculateWeight(3, 3, 3); + vote_mgr->addVerifiedVote(vote); } - auto new_round = vote_mgr->determineNewRound(12, two_t_plus_one); - EXPECT_EQ(*new_round, 13); + auto new_round = vote_mgr->determineNewRound(current_period, kMaxRound); + EXPECT_EQ(new_round.has_value(), true); + EXPECT_EQ(*new_round, kMaxRound + 1); } TEST_F(VoteTest, reconstruct_votes) { @@ -168,7 +167,6 @@ TEST_F(VoteTest, vote_broadcast) { WAIT_EXPECT_EQ(ctx, vote_mgr2->getVerifiedVotesSize(), 1) WAIT_EXPECT_EQ(ctx, vote_mgr3->getVerifiedVotesSize(), 1) }); - EXPECT_EQ(vote_mgr1->getVerifiedVotesSize(), 0); } TEST_F(VoteTest, two_t_plus_one_votes) {