diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c4ba85ae46e..b26a20f569b 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -96,7 +96,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -LE long_running_tests --output-on-failure + ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -108,6 +108,26 @@ steps: - "build/genesis.json" - "build/config.ini" timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":darwin: NP Tests" + agents: + - "role=macos-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -116,7 +136,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -132,6 +152,30 @@ steps: image: "eosio/ci:ubuntu" workdir: /data/job timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":ubuntu: NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:ubuntu" + workdir: /data/job + timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -140,7 +184,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -157,6 +201,30 @@ steps: workdir: /data/job timeout: 60 + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":ubuntu: 18.04 NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:ubuntu18" + workdir: /data/job + timeout: 60 + - command: | echo "--- :arrow_down: Downloading build directory" && \ buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ @@ -164,7 +232,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -181,6 +249,30 @@ steps: workdir: /data/job timeout: 60 + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":fedora: NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:fedora" + workdir: /data/job + timeout: 60 + - command: | echo "--- :arrow_down: Downloading build directory" && \ buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ @@ -188,7 +280,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -205,6 +297,30 @@ steps: workdir: /data/job timeout: 60 + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":centos: NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:centos" + workdir: /data/job + timeout: 60 + - command: | echo "--- :arrow_down: Downloading build directory" && \ buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ @@ -212,7 +328,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -228,3 +344,27 @@ steps: image: "eosio/ci:amazonlinux" workdir: /data/job timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":aws: NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:amazonlinux" + workdir: /data/job + timeout: 60 diff --git a/.gitmodules b/.gitmodules index 16559d89417..7d0f8a37f7b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -27,3 +27,6 @@ [submodule "libraries/fc"] path = libraries/fc url = https://github.com/EOSIO/fc +[submodule "libraries/wabt"] + path = libraries/wabt + url = http://github.com/EOSIO/wabt diff --git a/CMakeLists.txt b/CMakeLists.txt index 52ae84745ab..376cdf972ea 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,8 +26,8 @@ set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 2) -set(VERSION_PATCH 6) +set(VERSION_MINOR 3) +set(VERSION_PATCH 0) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) @@ -74,8 +74,6 @@ if ("${OPENSSL_ROOT_DIR}" STREQUAL "") endif() endif() -find_package(Secp256k1 REQUIRED) - if(UNIX) if(APPLE) set(whole_archive_flag "-force_load") diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 0827b00d700..d2ca3afdf41 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -50,6 +50,7 @@ endif() find_library(libbinaryen binaryen @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwasm WASM @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwast WAST @CMAKE_INSTALL_FULL_LIBDIR@) +find_library(libwabt wabt @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libir IR @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libplatform Platform @CMAKE_INSTALL_FULL_LIBDIR@) find_library(liblogging Logging @CMAKE_INSTALL_FULL_LIBDIR@) @@ -59,7 +60,13 @@ find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib) find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib) find_library(libchainbase chainbase @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libbuiltins builtins @CMAKE_INSTALL_FULL_LIBDIR@) -find_library(libsecp256k1 secp256k1 @Secp256k1_ROOT_DIR@/lib) +find_library(libsecp256k1 secp256k1 @CMAKE_INSTALL_FULL_LIBDIR@) +find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir + HINTS ENV GMP_LIB_DIR + ENV GMP_DIR + PATH_SUFFIXES lib + DOC "Path to the GMP library" +) macro(add_eosio_test test_name) add_executable( ${test_name} ${ARGN} ) @@ -71,6 +78,7 @@ macro(add_eosio_test test_name) ${libbinaryen} ${libwast} ${libwasm} + ${libwabt} ${libruntime} ${libplatform} ${libir} @@ -80,6 +88,7 @@ macro(add_eosio_test test_name) ${liblogging} ${libchainbase} ${libbuiltins} + ${GMP_LIBRARIES} ${libsecp256k1} LLVMX86Disassembler diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index e4243aff86f..fecd6c081ca 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -51,6 +51,7 @@ find_library(libbinaryen binaryen @CMAKE_BINARY_DIR@/externals/binaryen/lib) find_library(libwasm WASM @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/WASM) find_library(libwast WAST @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/WAST) find_library(libir IR @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/IR) +find_library(libwabt wabt @CMAKE_BINARY_DIR@/libraries/wabt) find_library(libplatform Platform @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/Platform) find_library(liblogging Logging @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/Logging) find_library(libruntime Runtime @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/Runtime) @@ -59,7 +60,13 @@ find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib) find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib) find_library(libchainbase chainbase @CMAKE_BINARY_DIR@/libraries/chainbase) find_library(libbuiltins builtins @CMAKE_BINARY_DIR@/libraries/builtins) -find_library(libsecp256k1 secp256k1 @Secp256k1_ROOT_DIR@/lib) +find_library(libsecp256k1 secp256k1 @CMAKE_BINARY_DIR@/libraries/fc/secp256k1) +find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir + HINTS ENV GMP_LIB_DIR + ENV GMP_DIR + PATH_SUFFIXES lib + DOC "Path to the GMP library" +) macro(add_eosio_test test_name) add_executable( ${test_name} ${ARGN} ) @@ -71,6 +78,7 @@ macro(add_eosio_test test_name) ${libbinaryen} ${libwast} ${libwasm} + ${libwabt} ${libruntime} ${libplatform} ${libir} @@ -80,6 +88,7 @@ macro(add_eosio_test test_name) ${liblogging} ${libchainbase} ${libbuiltins} + ${GMP_LIBRARIES} ${libsecp256k1} LLVMX86Disassembler diff --git a/Docker/Dockerfile b/Docker/Dockerfile index 92867dae89c..24dd447ed75 100644 --- a/Docker/Dockerfile +++ b/Docker/Dockerfile @@ -5,7 +5,7 @@ ARG symbol=SYS RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ && cmake -H. -B"/tmp/build" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/tmp/build -DSecp256k1_ROOT_DIR=/usr/local -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ + -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/tmp/build -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ && cmake --build /tmp/build --target install && rm /tmp/build/bin/eosiocpp @@ -21,5 +21,4 @@ COPY --from=builder /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh ENV EOSIO_ROOT=/opt/eosio RUN chmod +x /opt/eosio/bin/nodeosd.sh ENV LD_LIBRARY_PATH /usr/local/lib -VOLUME /opt/eosio/bin/data-dir ENV PATH /opt/eosio/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/Docker/README.md b/Docker/README.md index c94c61f7ccc..77c3fa4cfbd 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.2.6 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.3.0 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.2.6 --build-arg branch=v1.2.6 . +docker build -t eosio/eos:v1.3.0 --build-arg branch=v1.3.0 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/Docker/builder/Dockerfile b/Docker/builder/Dockerfile index ddb281099e7..cac09937cd0 100644 --- a/Docker/builder/Dockerfile +++ b/Docker/builder/Dockerfile @@ -50,13 +50,6 @@ RUN git clone --depth 1 --single-branch --branch release_40 https://github.com/l && cmake --build build --target install \ && cd .. && rm -rf llvm -RUN git clone --depth 1 https://github.com/cryptonomex/secp256k1-zkp \ - && cd secp256k1-zkp \ - && ./autogen.sh \ - && ./configure --prefix=/usr/local \ - && make -j$(nproc) install \ - && cd .. && rm -rf secp256k1-zkp - RUN git clone --depth 1 -b releases/v3.3 https://github.com/mongodb/mongo-cxx-driver \ && cd mongo-cxx-driver/build \ && cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. \ diff --git a/Docker/dev/Dockerfile b/Docker/dev/Dockerfile index 2b7bf84987f..f2dea74ac6c 100644 --- a/Docker/dev/Dockerfile +++ b/Docker/dev/Dockerfile @@ -5,7 +5,7 @@ ARG symbol=SYS RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ && cmake -H. -B"/opt/eosio" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eosio -DSecp256k1_ROOT_DIR=/usr/local -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ + -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eosio -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ && cmake --build /opt/eosio --target install \ && cp /eos/Docker/config.ini / && ln -s /opt/eosio/contracts /contracts && cp /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh && ln -s /eos/tutorials /tutorials @@ -14,5 +14,4 @@ RUN pip3 install numpy ENV EOSIO_ROOT=/opt/eosio RUN chmod +x /opt/eosio/bin/nodeosd.sh ENV LD_LIBRARY_PATH /usr/local/lib -VOLUME /opt/eosio/bin/data-dir ENV PATH /opt/eosio/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/Docker/nodeosd.sh b/Docker/nodeosd.sh index b808ccad460..870548d6b6b 100755 --- a/Docker/nodeosd.sh +++ b/Docker/nodeosd.sh @@ -1,6 +1,10 @@ #!/bin/sh cd /opt/eosio/bin +if [ ! -d "/opt/eosio/bin/data-dir" ]; then + mkdir /opt/eosio/bin/data-dir +fi + if [ -f '/opt/eosio/bin/data-dir/config.ini' ]; then echo else diff --git a/contracts/CMakeLists.txt b/contracts/CMakeLists.txt index 99a32aa53fa..c6eb0903d63 100644 --- a/contracts/CMakeLists.txt +++ b/contracts/CMakeLists.txt @@ -15,7 +15,6 @@ add_subdirectory(multi_index_test) add_subdirectory(eosio.system) add_subdirectory(identity) add_subdirectory(stltest) -add_subdirectory(exchange) add_subdirectory(test.inline) #add_subdirectory(bancor) diff --git a/contracts/eosio.system/exchange_state.cpp b/contracts/eosio.system/exchange_state.cpp index b621bdef902..621d3e714b3 100644 --- a/contracts/eosio.system/exchange_state.cpp +++ b/contracts/eosio.system/exchange_state.cpp @@ -1,4 +1,4 @@ -#include +#include namespace eosiosystem { asset exchange_state::convert_to_exchange( connector& c, asset in ) { diff --git a/contracts/eosiolib/compiler_builtins.h b/contracts/eosiolib/compiler_builtins.h index 62e2ff2515e..3e0d9435357 100644 --- a/contracts/eosiolib/compiler_builtins.h +++ b/contracts/eosiolib/compiler_builtins.h @@ -189,7 +189,7 @@ extern "C" { /** * Add two long doubles split as two 64 bit unsigned integers and assign the value to the first parameter. * @brief Add two long doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. + * @param ret It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -201,7 +201,7 @@ extern "C" { /** * Subtract two long doubles split as two 64 bit unsigned integers and assign the value to the first parameter. * @brief Subtract two long doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. + * @param ret It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -213,7 +213,7 @@ extern "C" { /** * Multiply two long doubles split as two 64 bit unsigned integers and assign the value to the first parameter. * @brief Multiply two long doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. + * @param ret It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -225,7 +225,7 @@ extern "C" { /** * Divide two long doubles split as two 64 bit unsigned integers and assign the value to the first parameter. * @brief Divide two long doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. + * @param ret It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -237,7 +237,6 @@ extern "C" { /** * Check equality between two doubles split as two 64 bit unsigned integers * @brief Check equality between two doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -252,7 +251,6 @@ extern "C" { /** * Check inequality between two doubles split as two 64 bit unsigned integers * @brief Check inequality between two doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -268,7 +266,6 @@ extern "C" { /** * Check if the first double is greater or equal to the second double, the doubles are split as two 64 bit unsigned integers * @brief Check if the first double is greater or equal to the second double, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -283,7 +280,6 @@ extern "C" { /** * Check if the first double is greater than the second double, the doubles are split as two 64 bit unsigned integers * @brief Check if the first double is greater than the second double, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -298,7 +294,6 @@ extern "C" { /** * Check if the first double is less or equal to the second double, the doubles are split as two 64 bit unsigned integers * @brief Check if the first double is less or equal to the second double, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -313,7 +308,6 @@ extern "C" { /** * Check if the first double is less than the second double, the doubles are split as two 64 bit unsigned integers * @brief Check if the first double is less than the second double, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -328,7 +322,6 @@ extern "C" { /** * Compare two doubles which are split as two 64 bit unsigned integers * @brief Compare two doubles (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -343,7 +336,6 @@ extern "C" { /** * Check if either of the doubles is NaN, the doubles are split as two 64 bit unsigned integers * @brief Check if either of the doubles is NaN, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. diff --git a/contracts/eosiolib/fixedpoint.hpp b/contracts/eosiolib/fixedpoint.hpp index 9f2ced9fd2c..8c36f5ea54b 100644 --- a/contracts/eosiolib/fixedpoint.hpp +++ b/contracts/eosiolib/fixedpoint.hpp @@ -362,7 +362,7 @@ namespace eosio * Assignment operator. Assign fixed_point32 to fixed_point64 * * @brief Assignment operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return fixed_point64& - Reference to this object */ @@ -372,7 +372,7 @@ namespace eosio * Assignment operator. Assign fixed_point64 to fixed_point64 * * @brief Assignment operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return fixed_point64& - Reference to this object */ @@ -426,7 +426,7 @@ namespace eosio * Equality operator * * @brief Equality operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -437,7 +437,7 @@ namespace eosio * Greater than operator * * @brief Greater than operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -448,7 +448,7 @@ namespace eosio * Less than operator * * @brief Less than operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -506,7 +506,7 @@ namespace eosio * Construct a new fixed point32 object from int32_t * * @brief Construct a new fixed point32 object - * @param v - int32_t representation of the fixed point value + * @param param - int32_t representation of the fixed point value */ fixed_point32(int32_t param=0) : val(param) {} @@ -553,7 +553,7 @@ namespace eosio * Assignment operator. Assign fixed_point32 to fixed_point32 * * @brief Assignment operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return fixed_point32& - Reference to this object */ @@ -563,7 +563,7 @@ namespace eosio * Assignment operator. Assign fixed_point64 to fixed_point32 * * @brief Assignment operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return fixed_point32& - Reference to this object */ @@ -615,7 +615,7 @@ namespace eosio * Equality operator * * @brief Equality operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -626,7 +626,7 @@ namespace eosio * Greater than operator * * @brief Greater than operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -637,7 +637,7 @@ namespace eosio * Less than operator * * @brief Less than operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise diff --git a/contracts/eosiolib/transaction.h b/contracts/eosiolib/transaction.h index 99e1d3c2c46..dd7c05ded17 100644 --- a/contracts/eosiolib/transaction.h +++ b/contracts/eosiolib/transaction.h @@ -13,7 +13,7 @@ extern "C" { * * * Deferred transactions will not be processed until a future block. They - * can therefore have no effect on the success of failure of their parent + * can therefore have no effect on the success or failure of their parent * transaction so long as they appear well formed. If any other condition * causes the parent transaction to be marked as failing, then the deferred * transaction will never be processed. @@ -27,7 +27,7 @@ extern "C" { * ends such that the success or failure of the parent transaction is * dependent on the success of the message. If an inline message fails in * processing then the whole tree of transactions and actions rooted in the - * block will me marked as failing and none of effects on the database will + * block will be marked as failing and none of effects on the database will * persist. * * Inline actions and Deferred transactions must adhere to the permissions @@ -68,7 +68,7 @@ extern "C" { * @return 1 if transaction was canceled, 0 if transaction was not found * * Example: -* + * * @code * id = 0xffffffffffffffff * cancel_deferred( id ); diff --git a/contracts/exchange/CMakeLists.txt b/contracts/exchange/CMakeLists.txt deleted file mode 100644 index c855872ef5d..00000000000 --- a/contracts/exchange/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -file(GLOB ABI_FILES "*.abi") -add_wast_executable(TARGET exchange - INCLUDE_FOLDERS "${STANDARD_INCLUDE_FOLDERS}" - LIBRARIES libc++ libc eosiolib - DESTINATION_FOLDER ${CMAKE_CURRENT_BINARY_DIR} -) -configure_file("${ABI_FILES}" "${CMAKE_CURRENT_BINARY_DIR}" COPYONLY) - -add_executable(test_exchange test_exchange.cpp ) -#bfp/lib/pack.c bfp/lib/posit.cpp bfp/lib/util.c bfp/lib/op2.c) -target_link_libraries( test_exchange fc ) -target_include_directories( test_exchange PUBLIC fixed_point/include ) diff --git a/contracts/exchange/Pegged Derivative Currency Design.md b/contracts/exchange/Pegged Derivative Currency Design.md deleted file mode 100644 index fc819ccd61f..00000000000 --- a/contracts/exchange/Pegged Derivative Currency Design.md +++ /dev/null @@ -1,120 +0,0 @@ -# Pegged Derivative Currency Design - -A currency is designed to be a fungible and non-callable asset. A pegged Derivative currency, such as BitUSD, is backed by a cryptocurrency held as collateral. The "issuer" is "short" the dollar and extra-long the cryptocurrency. The buyer is simply long the dollar. - - - -Background ----------- -BitShares created the first working pegged asset system by allowing anyone to take out a short position by posting collateral and issuing BitUSD at a minimum 1.5:1 collateral:debt ratio. The **least collateralized position** was forced to provide liquidity for BitUSD holders -any time the market price fell more than a couple percent below the dollar (if the BitUSD holder opted to use forced liquidation). - -To prevent abuse of the price feed, all forced liquidation was delayed. - -In the event of a "black swan" all shorts have their positions liquidated at the price feed and all holders of BitUSD are only promised a fixed redemption rate. - -There are several problems with this design: - -1. There is very **poor liquidity** in the BitUSD / BitShares market creating **large spreads** -2. The shorts take all the risk and only profit when the price of BitShares rises -3. Blackswans are perpetual and very disruptive. -4. It is "every short for themselves" -5. Due to the risk/reward ratio the supply can be limited -6. The **collateral requirements** limit opportunity for leverage. - -New Approach ------------- -We present a new approach to pegged assets where the short-positions cooperate to provide the -service of a pegged asset with **high liquidity**. They make money by encouraging people to trade -their pegged asset and earning income **from the trading fees rather than seeking heavy leverage** -in a speculative market. They also generate money by earning interest on personal short positions. - -The Setup Process ------------------ -An initial user deposits a Collateral Currency (C) into an smart contract and provides the initial -price feed. A new Debt token (D) is issued based upon the price feed and a 1.5:1 C:D ratio and the -issued tokens are deposited into the **Bancor market maker**. At this point in time there is 0 leverage by -the market maker because no D have been sold. The initial user is also issued exchange tokens (E) in the -market maker. - -At this point people can buy E or D and the Bancor algorithm will provide liquidity between C, E, and D. Due to -the fees charged by the the market maker the value of E will increase in terms of C. - -> Collateral currency = Smart Token/reserve of parent currency -> -> Issued tokens = Bounty Tokens (distributed to early holders / community supporters) -> -> Collateral Ratio (C:D) = reciprocal of Loan-to-Value Ratio (LTV) - -Maintaining the Peg -------------------- -To maximize the utility of the D token, the market maker needs to maintain a **narrow trading range** of D vs the Dollar. -The more **consistant and reliable this trading range** is, the more people (arbitrageur) will be willing to hold and trade D. There are several -situations that can occur: - -1. D is trading above a dollar +5% - - a. Maker is fully collateralized `C:D>1.5` - - - issue new D and deposit into maker such that collateral ratio is 1.5:1 - b. Maker is not fully collateralized `C:D<1.5` - - - adjust the maker weights to lower the redemption prices (defending capital of maker), arbitrageur will probably prevent this reality. - - > Marker Weights = Connector Weights (in Bancor) - > - > Redemption Price: The price at which a bond may be repurchased by the issuer before maturity - -2. D is selling for less than a dollar -5% - - a. Maker is fully collateralized `C:D>1.5` - - - adjust the maker weights to increase redemption prices - b. Maker is under collateralized `C:D<1.5` - ``` - - stop E -> C and E -> D trades. - - offer bonus on C->E and D->E trades. - - on D->E conversions take received D out of circulation rather than add to the market maker - - on C<->D conversion continue as normal - - stop attempting adjusting maker ratio to defend the price feed and let the price rise until above +1% - ``` - -Value of E = C - D where D == all in circulation, so E->C conversions should always assume all outstanding D was **settled at current maker price**. The result of such a conversion will **raise the collateral ratio**, unless they are forced to buy and retire some D at the current ratio. The algorithm must ensure the individual selling E doesn't leave those holding E worse-off from a D/E perspective (doesnot reduce D to a large extent). An individual buying E will create new D to maintain the same D/E ratio. - -This implies that when value of all outstanding D is greater than all C that E cannot be sold until the network -generates **enough in trading fees** to recaptialize the market. This is like a company with more debt than equity not allowing buybacks. In fact, **E should not be sellable any time the collateral ratio falls below 1.5:1**. - -BitShares is typical **margin call** territory, but holders of E have a chance at future liquidity if the situation improves. While E is not sellable, -E can be purchased at a 10% discount to its theoretical value, this will dilute existing holders of E but will raise capital and hopefully move E holders closer to eventual liquidity. - - -Adjusting Bancor Ratios by Price Feed -------------------------------------- -The price feed informs the algorithm of significant deviations between the Bancor effective price and the target peg. The price feed is necessarily a lagging indicator and may also factor in natural spreads between different exchanges. Therefore, the price feed shall have no impact unless there is a significant deviation (5%). When such a deviation occurs, the ratio is automatically adjusted to 4%. - -In other words, the price feed keeps the maker in the "channel" but does not attempt to set the real-time prices. If there is a sudden change and the price feed differs from maker by 50% then after the adjustment it will still differ by 4%. - -> Effective Price = Connected Tokens exchanged / Smart Tokens exchanged - -Summary -------- -Under this model holders of E are short the dollar and make money to recollateralize their positions via market activity. -Anyone selling E must **realize the losses as a result of being short**. -Anyone buying E can get in to take their place at the current collateral ratio. - -The value of E is equal to the value of a **margin postion**. -Anyone can buy E for a combination C and D equal to the current collateral ratio. - -Anyone may sell E for a personal margin position with equal ratio of C and D. -Anyone may buy E with a personal margin position. - -If they only have C, then they must use some of C to buy D first (which will move the price). -If they only have D, then they must use some of D to buy C first (which will also move the price). - -Anyone can buy and sell E based upon Bancor balances of C and (all D), they must sell their E for a combination of D and C at current ratio, then sell the C or D for the other. - - -Anytime collateral level falls below 1.5 selling E is blocked and buying of E is given a 10% bonus. -Anyone can convert D<->C using Bancor maker configured to maintain price within +/- 5% of the price feed. - - diff --git a/contracts/exchange/exchange.abi b/contracts/exchange/exchange.abi deleted file mode 100644 index b4cde189371..00000000000 --- a/contracts/exchange/exchange.abi +++ /dev/null @@ -1,166 +0,0 @@ -{ - "version": "eosio::abi/1.0", - "types": [{ - "new_type_name": "account_name", - "type": "name" - } - ], - "structs": [ - { - "name": "extended_symbol", - "base": "", - "fields": [ - {"name":"sym", "type":"symbol"}, - {"name":"contract", "type":"account_name"} - ] - }, - { - "name": "extended_asset", - "base": "", - "fields": [ - {"name":"quantity", "type":"asset"}, - {"name":"contract", "type":"account_name"} - ] - }, - { - "name": "upmargin", - "base": "", - "fields": [ - {"name":"borrower", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"delta_borrow", "type":"extended_asset"}, - {"name":"delta_collateral", "type":"extended_asset"} - ] - }, - { - "name": "covermargin", - "base": "", - "fields": [ - {"name":"borrower", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"cover_amount", "type":"extended_asset"} - ] - }, - { - "name": "lend", - "base": "", - "fields": [ - {"name":"lender", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"quantity", "type":"extended_asset"} - ] - }, - { - "name": "unlend", - "base": "", - "fields": [ - {"name":"lender", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"interest_shares", "type":"float64"}, - {"name":"interest_symbol", "type":"extended_symbol"} - ] - }, - { - "name": "trade", - "base": "", - "fields": [ - {"name":"seller", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"sell", "type":"extended_asset"}, - {"name":"min_receive", "type":"extended_asset"}, - {"name":"expire", "type":"uint32"}, - {"name":"fill_or_kill", "type":"uint8"} - ] - }, - { - "name": "createx", - "base": "", - "fields": [ - {"name":"creator", "type":"account_name"}, - {"name":"initial_supply", "type":"asset"}, - {"name":"fee", "type":"uint32"}, - {"name":"base_deposit", "type":"extended_asset"}, - {"name":"quote_deposit", "type":"extended_asset"} - ] - }, - { - "name": "transfer", - "base": "", - "fields": [ - {"name":"from", "type":"account_name"}, - {"name":"to", "type":"account_name"}, - {"name":"quantity", "type":"asset"}, - {"name":"memo", "type":"string"} - ] - }, - { - "name": "deposit", - "base": "", - "fields": [ - {"name":"from", "type":"account_name"}, - {"name":"quantity", "type":"extended_asset"} - ] - }, - { - "name": "create", - "base": "", - "fields": [ - {"name":"issuer", "type":"account_name"}, - {"name":"maximum_supply", "type":"asset"}, - {"name":"can_freeze", "type":"uint8"}, - {"name":"can_recall", "type":"uint8"}, - {"name":"can_whitelist", "type":"uint8"} - ] - },{ - "name": "issue", - "base": "", - "fields": [ - {"name":"to", "type":"account_name"}, - {"name":"quantity", "type":"asset"}, - {"name":"memo", "type":"string"} - ] - },{ - "name": "account", - "base": "", - "fields": [ - {"name":"currency", "type":"uint64"}, - {"name":"balance", "type":"uint64"} - ] - },{ - "name": "currency_stats", - "base": "", - "fields": [ - {"name":"currency", "type":"uint64"}, - {"name":"supply", "type":"uint64"} - ] - } - ], - "actions": [ - { "name": "deposit", "type": "deposit", "ricardian_contract": "" }, - { "name": "transfer", "type": "transfer", "ricardian_contract": "" }, - { "name": "trade", "type": "trade", "ricardian_contract": "" }, - { "name": "createx", "type": "createx", "ricardian_contract": "" }, - { "name": "issue", "type": "issue", "ricardian_contract": "" }, - { "name": "lend", "type": "lend", "ricardian_contract": "" }, - { "name": "unlend", "type": "unlend", "ricardian_contract": "" }, - { "name": "upmargin", "type": "upmargin", "ricardian_contract": "" }, - { "name": "covermargin", "type": "covermargin", "ricardian_contract": "" }, - { "name": "create", "type": "create", "ricardian_contract": "" } - ], - "tables": [{ - "name": "account", - "type": "account", - "index_type": "i64", - "key_names" : ["currency"], - "key_types" : ["uint64"] - },{ - "name": "stat", - "type": "currency_stats", - "index_type": "i64", - "key_names" : ["currency"], - "key_types" : ["uint64"] - } - ], - "ricardian_clauses": [], - "abi_extensions": [] -} diff --git a/contracts/exchange/exchange.cpp b/contracts/exchange/exchange.cpp deleted file mode 100644 index f90076cf3e1..00000000000 --- a/contracts/exchange/exchange.cpp +++ /dev/null @@ -1,250 +0,0 @@ -#include -#include "exchange.hpp" - -#include "exchange_state.cpp" -#include "exchange_accounts.cpp" -#include "market_state.cpp" - -#include - -namespace eosio { - - void exchange::deposit( account_name from, extended_asset quantity ) { - eosio_assert( quantity.is_valid(), "invalid quantity" ); - currency::inline_transfer( from, _this_contract, quantity, "deposit" ); - _accounts.adjust_balance( from, quantity, "deposit" ); - } - - void exchange::withdraw( account_name from, extended_asset quantity ) { - require_auth( from ); - eosio_assert( quantity.is_valid(), "invalid quantity" ); - eosio_assert( quantity.amount >= 0, "cannot withdraw negative balance" ); // Redundant? inline_transfer will fail if quantity is not positive. - _accounts.adjust_balance( from, -quantity ); - currency::inline_transfer( _this_contract, from, quantity, "withdraw" ); - } - - void exchange::on( const trade& t ) { - require_auth( t.seller ); - eosio_assert( t.sell.is_valid(), "invalid sell amount" ); - eosio_assert( t.sell.amount > 0, "sell amount must be positive" ); - eosio_assert( t.min_receive.is_valid(), "invalid min receive amount" ); - eosio_assert( t.min_receive.amount >= 0, "min receive amount cannot be negative" ); - - auto receive_symbol = t.min_receive.get_extended_symbol(); - eosio_assert( t.sell.get_extended_symbol() != receive_symbol, "invalid conversion" ); - - market_state market( _this_contract, t.market, _accounts ); - - auto temp = market.exstate; - auto output = temp.convert( t.sell, receive_symbol ); - - while( temp.requires_margin_call() ) { - market.margin_call( receive_symbol ); - temp = market.exstate; - output = temp.convert( t.sell, receive_symbol ); - } - market.exstate = temp; - - print( name{t.seller}, " ", t.sell, " => ", output, "\n" ); - - if( t.min_receive.amount != 0 ) { - eosio_assert( t.min_receive.amount <= output.amount, "unable to fill" ); - } - - _accounts.adjust_balance( t.seller, -t.sell, "sold" ); - _accounts.adjust_balance( t.seller, output, "received" ); - - if( market.exstate.supply.amount != market.initial_state().supply.amount ) { - auto delta = market.exstate.supply - market.initial_state().supply; - - _excurrencies.issue_currency( { .to = _this_contract, - .quantity = delta, - .memo = string("") } ); - } - - /// TODO: if pending order start deferred trx to fill it - - market.save(); - } - - - /** - * This action shall fail if it would result in a margin call - */ - void exchange::on( const upmargin& b ) { - require_auth( b.borrower ); - eosio_assert( b.delta_borrow.is_valid(), "invalid borrow delta" ); - eosio_assert( b.delta_collateral.is_valid(), "invalid collateral delta" ); - - market_state market( _this_contract, b.market, _accounts ); - - eosio_assert( b.delta_borrow.amount != 0 || b.delta_collateral.amount != 0, "no effect" ); - eosio_assert( b.delta_borrow.get_extended_symbol() != b.delta_collateral.get_extended_symbol(), "invalid args" ); - eosio_assert( market.exstate.base.balance.get_extended_symbol() == b.delta_borrow.get_extended_symbol() || - market.exstate.quote.balance.get_extended_symbol() == b.delta_borrow.get_extended_symbol(), - "invalid asset for market" ); - eosio_assert( market.exstate.base.balance.get_extended_symbol() == b.delta_collateral.get_extended_symbol() || - market.exstate.quote.balance.get_extended_symbol() == b.delta_collateral.get_extended_symbol(), - "invalid asset for market" ); - - market.update_margin( b.borrower, b.delta_borrow, b.delta_collateral ); - - /// if this succeeds then the borrower will see their balances adjusted accordingly, - /// if they don't have sufficient balance to either fund the collateral or pay off the - /// debt then this will fail before we go further. - _accounts.adjust_balance( b.borrower, b.delta_borrow, "borrowed" ); - _accounts.adjust_balance( b.borrower, -b.delta_collateral, "collateral" ); - - market.save(); - } - - void exchange::on( const covermargin& c ) { - require_auth( c.borrower ); - eosio_assert( c.cover_amount.is_valid(), "invalid cover amount" ); - eosio_assert( c.cover_amount.amount > 0, "cover amount must be positive" ); - - market_state market( _this_contract, c.market, _accounts ); - - market.cover_margin( c.borrower, c.cover_amount); - - market.save(); - } - - void exchange::createx( account_name creator, - asset initial_supply, - uint32_t /* fee */, - extended_asset base_deposit, - extended_asset quote_deposit - ) { - require_auth( creator ); - eosio_assert( initial_supply.is_valid(), "invalid initial supply" ); - eosio_assert( initial_supply.amount > 0, "initial supply must be positive" ); - eosio_assert( base_deposit.is_valid(), "invalid base deposit" ); - eosio_assert( base_deposit.amount > 0, "base deposit must be positive" ); - eosio_assert( quote_deposit.is_valid(), "invalid quote deposit" ); - eosio_assert( quote_deposit.amount > 0, "quote deposit must be positive" ); - eosio_assert( base_deposit.get_extended_symbol() != quote_deposit.get_extended_symbol(), - "must exchange between two different currencies" ); - - print( "base: ", base_deposit.get_extended_symbol() ); - print( "quote: ",quote_deposit.get_extended_symbol() ); - - auto exchange_symbol = initial_supply.symbol.name(); - print( "marketid: ", exchange_symbol, " \n " ); - - markets exstates( _this_contract, exchange_symbol ); - auto existing = exstates.find( exchange_symbol ); - - eosio_assert( existing == exstates.end(), "market already exists" ); - exstates.emplace( creator, [&]( auto& s ) { - s.manager = creator; - s.supply = extended_asset(initial_supply, _this_contract); - s.base.balance = base_deposit; - s.quote.balance = quote_deposit; - - s.base.peer_margin.total_lent.symbol = base_deposit.symbol; - s.base.peer_margin.total_lent.contract = base_deposit.contract; - s.base.peer_margin.total_lendable.symbol = base_deposit.symbol; - s.base.peer_margin.total_lendable.contract = base_deposit.contract; - - s.quote.peer_margin.total_lent.symbol = quote_deposit.symbol; - s.quote.peer_margin.total_lent.contract = quote_deposit.contract; - s.quote.peer_margin.total_lendable.symbol = quote_deposit.symbol; - s.quote.peer_margin.total_lendable.contract = quote_deposit.contract; - }); - - _excurrencies.create_currency( { .issuer = _this_contract, - // TODO: After currency contract respects maximum supply limits, the maximum supply here needs to be set appropriately. - .maximum_supply = asset( 0, initial_supply.symbol ), - .issuer_can_freeze = false, - .issuer_can_whitelist = false, - .issuer_can_recall = false } ); - - _excurrencies.issue_currency( { .to = _this_contract, - .quantity = initial_supply, - .memo = string("initial exchange tokens") } ); - - _accounts.adjust_balance( creator, extended_asset( initial_supply, _this_contract ), "new exchange issue" ); - _accounts.adjust_balance( creator, -base_deposit, "new exchange deposit" ); - _accounts.adjust_balance( creator, -quote_deposit, "new exchange deposit" ); - } - - void exchange::lend( account_name lender, symbol_type market, extended_asset quantity ) { - require_auth( lender ); - eosio_assert( quantity.is_valid(), "invalid quantity" ); - eosio_assert( quantity.amount > 0, "must lend a positive amount" ); - - market_state m( _this_contract, market, _accounts ); - m.lend( lender, quantity ); - m.save(); - } - - void exchange::unlend( account_name lender, symbol_type market, double interest_shares, extended_symbol interest_symbol ) { - require_auth( lender ); - eosio_assert( interest_shares > 0, "must unlend a positive amount" ); - - market_state m( _this_contract, market, _accounts ); - m.unlend( lender, interest_shares, interest_symbol ); - m.save(); - } - - - void exchange::on( const currency::transfer& t, account_name code ) { - if( code == _this_contract ) - _excurrencies.on( t ); - - if( t.to == _this_contract ) { - auto a = extended_asset(t.quantity, code); - eosio_assert( a.is_valid(), "invalid quantity in transfer" ); - eosio_assert( a.amount != 0, "zero quantity is disallowed in transfer"); - eosio_assert( a.amount > 0 || t.memo == "withdraw", "withdrew tokens without withdraw in memo"); - eosio_assert( a.amount < 0 || t.memo == "deposit", "received tokens without deposit in memo" ); - _accounts.adjust_balance( t.from, a, t.memo ); - } - } - - - #define N(X) ::eosio::string_to_name(#X) - - void exchange::apply( account_name contract, account_name act ) { - - if( act == N(transfer) ) { - on( unpack_action_data(), contract ); - return; - } - - if( contract != _this_contract ) - return; - - auto& thiscontract = *this; - switch( act ) { - EOSIO_API( exchange, (createx)(deposit)(withdraw)(lend)(unlend) ) - }; - - switch( act ) { - case N(trade): - on( unpack_action_data() ); - return; - case N(upmargin): - on( unpack_action_data() ); - return; - case N(covermargin): - on( unpack_action_data() ); - return; - default: - _excurrencies.apply( contract, act ); - return; - } - } - -} /// namespace eosio - - - -extern "C" { - [[noreturn]] void apply( uint64_t receiver, uint64_t code, uint64_t action ) { - eosio::exchange ex( receiver ); - ex.apply( code, action ); - eosio_exit(0); - } -} diff --git a/contracts/exchange/exchange.hpp b/contracts/exchange/exchange.hpp deleted file mode 100644 index 9ee3139e0b0..00000000000 --- a/contracts/exchange/exchange.hpp +++ /dev/null @@ -1,87 +0,0 @@ -#include -#include -#include -#include -#include - -namespace eosio { - - /** - * This contract enables users to create an exchange between any pair of - * standard currency types. A new exchange is created by funding it with - * an equal value of both sides of the order book and giving the issuer - * the initial shares in that orderbook. - * - * To prevent excessive rounding errors, the initial deposit should include - * a sizeable quantity of both the base and quote currencies and the exchange - * shares should have a quantity 100x the quantity of the largest initial - * deposit. - * - * Users must deposit funds into the exchange before they can trade on the - * exchange. - * - * Each time an exchange is created a new currency for that exchanges market - * maker is also created. This currencies supply and symbol must be unique and - * it uses the currency contract's tables to manage it. - */ - class exchange { - private: - account_name _this_contract; - currency _excurrencies; - exchange_accounts _accounts; - - public: - exchange( account_name self ) - :_this_contract(self), - _excurrencies(self), - _accounts(self) - {} - - void createx( account_name creator, - asset initial_supply, - uint32_t fee, - extended_asset base_deposit, - extended_asset quote_deposit - ); - - void deposit( account_name from, extended_asset quantity ); - void withdraw( account_name from, extended_asset quantity ); - void lend( account_name lender, symbol_type market, extended_asset quantity ); - - void unlend( - account_name lender, - symbol_type market, - double interest_shares, - extended_symbol interest_symbol - ); - - struct covermargin { - account_name borrower; - symbol_type market; - extended_asset cover_amount; - }; - - struct upmargin { - account_name borrower; - symbol_type market; - extended_asset delta_borrow; - extended_asset delta_collateral; - }; - - struct trade { - account_name seller; - symbol_type market; - extended_asset sell; - extended_asset min_receive; - uint32_t expire = 0; - uint8_t fill_or_kill = true; - }; - - void on( const trade& t ); - void on( const upmargin& b ); - void on( const covermargin& b ); - void on( const currency::transfer& t, account_name code ); - - void apply( account_name contract, account_name act ); - }; -} // namespace eosio diff --git a/contracts/exchange/exchange_accounts.cpp b/contracts/exchange/exchange_accounts.cpp deleted file mode 100644 index 249b56c3e66..00000000000 --- a/contracts/exchange/exchange_accounts.cpp +++ /dev/null @@ -1,27 +0,0 @@ -#include - -namespace eosio { - - void exchange_accounts::adjust_balance( account_name owner, extended_asset delta, const string& reason ) { - (void)reason; - - auto table = exaccounts_cache.find( owner ); - if( table == exaccounts_cache.end() ) { - table = exaccounts_cache.emplace( owner, exaccounts(_this_contract, owner ) ).first; - } - auto useraccounts = table->second.find( owner ); - if( useraccounts == table->second.end() ) { - table->second.emplace( owner, [&]( auto& exa ){ - exa.owner = owner; - exa.balances[delta.get_extended_symbol()] = delta.amount; - eosio_assert( delta.amount >= 0, "overdrawn balance 1" ); - }); - } else { - table->second.modify( useraccounts, 0, [&]( auto& exa ) { - const auto& b = exa.balances[delta.get_extended_symbol()] += delta.amount; - eosio_assert( b >= 0, "overdrawn balance 2" ); - }); - } - } - -} /// namespace eosio diff --git a/contracts/exchange/exchange_accounts.hpp b/contracts/exchange/exchange_accounts.hpp deleted file mode 100644 index 2ec6027c5ec..00000000000 --- a/contracts/exchange/exchange_accounts.hpp +++ /dev/null @@ -1,43 +0,0 @@ -#pragma once -#include -#include - -namespace eosio { - - using boost::container::flat_map; - - /** - * Each user has their own account with the exchange contract that keeps track - * of how much a user has on deposit for each extended asset type. The assumption - * is that storing a single flat map of all balances for a particular user will - * be more practical than breaking this down into a multi-index table sorted by - * the extended_symbol. - */ - struct exaccount { - account_name owner; - flat_map balances; - - uint64_t primary_key() const { return owner; } - EOSLIB_SERIALIZE( exaccount, (owner)(balances) ) - }; - - typedef eosio::multi_index exaccounts; - - - /** - * Provides an abstracted interface around storing balances for users. This class - * caches tables to make multiple accesses effecient. - */ - struct exchange_accounts { - exchange_accounts( account_name code ):_this_contract(code){} - - void adjust_balance( account_name owner, extended_asset delta, const string& reason = string() ); - - private: - account_name _this_contract; - /** - * Keep a cache of all accounts tables we access - */ - flat_map exaccounts_cache; - }; -} /// namespace eosio diff --git a/contracts/exchange/exchange_state.cpp b/contracts/exchange/exchange_state.cpp deleted file mode 100644 index 7f40fae9641..00000000000 --- a/contracts/exchange/exchange_state.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include - -namespace eosio { - extended_asset exchange_state::convert_to_exchange( connector& c, extended_asset in ) { - - real_type R(supply.amount); - real_type C(c.balance.amount+in.amount); - real_type F(c.weight/1000.0); - real_type T(in.amount); - real_type ONE(1.0); - - real_type E = -R * (ONE - std::pow( ONE + T / C, F) ); - int64_t issued = int64_t(E); - - supply.amount += issued; - c.balance.amount += in.amount; - - return extended_asset( issued, supply.get_extended_symbol() ); - } - - extended_asset exchange_state::convert_from_exchange( connector& c, extended_asset in ) { - eosio_assert( in.contract == supply.contract, "unexpected asset contract input" ); - eosio_assert( in.symbol== supply.symbol, "unexpected asset symbol input" ); - - real_type R(supply.amount - in.amount); - real_type C(c.balance.amount); - real_type F(1000.0/c.weight); - real_type E(in.amount); - real_type ONE(1.0); - - - real_type T = C * (std::pow( ONE + E/R, F) - ONE); - int64_t out = int64_t(T); - - supply.amount -= in.amount; - c.balance.amount -= out; - - return extended_asset( out, c.balance.get_extended_symbol() ); - } - - extended_asset exchange_state::convert( extended_asset from, extended_symbol to ) { - auto sell_symbol = from.get_extended_symbol(); - auto ex_symbol = supply.get_extended_symbol(); - auto base_symbol = base.balance.get_extended_symbol(); - auto quote_symbol = quote.balance.get_extended_symbol(); - - if( sell_symbol != ex_symbol ) { - if( sell_symbol == base_symbol ) { - from = convert_to_exchange( base, from ); - } else if( sell_symbol == quote_symbol ) { - from = convert_to_exchange( quote, from ); - } else { - eosio_assert( false, "invalid sell" ); - } - } else { - if( to == base_symbol ) { - from = convert_from_exchange( base, from ); - } else if( to == quote_symbol ) { - from = convert_from_exchange( quote, from ); - } else { - eosio_assert( false, "invalid conversion" ); - } - } - - if( to != from.get_extended_symbol() ) - return convert( from, to ); - - return from; - } - - bool exchange_state::requires_margin_call( const exchange_state::connector& con )const { - if( con.peer_margin.total_lent.amount > 0 ) { - auto tmp = *this; - auto base_total_col = int64_t(con.peer_margin.total_lent.amount * con.peer_margin.least_collateralized); - auto covered = tmp.convert( extended_asset( base_total_col, con.balance.get_extended_symbol()), con.peer_margin.total_lent.get_extended_symbol() ); - if( covered.amount <= con.peer_margin.total_lent.amount ) - return true; - } - return false; - } - - bool exchange_state::requires_margin_call()const { - return requires_margin_call( base ) || requires_margin_call( quote ); - } - - -} /// namespace eosio diff --git a/contracts/exchange/exchange_state.hpp b/contracts/exchange/exchange_state.hpp deleted file mode 100644 index 1d102578062..00000000000 --- a/contracts/exchange/exchange_state.hpp +++ /dev/null @@ -1,87 +0,0 @@ -#pragma once - -#include - -namespace eosio { - - typedef double real_type; - - struct margin_state { - extended_asset total_lendable; - extended_asset total_lent; - real_type least_collateralized = std::numeric_limits::max(); - - /** - * Total shares allocated to those who have lent, when someone unlends they get - * total_lendable * user_interest_shares / interest_shares and total_lendable is reduced. - * - * When interest is paid, it shows up in total_lendable - */ - real_type interest_shares = 0; - - real_type lend( int64_t new_lendable ) { - if( total_lendable.amount > 0 ) { - real_type new_shares = (interest_shares * new_lendable) / total_lendable.amount; - interest_shares += new_shares; - total_lendable.amount += new_lendable; - } else { - interest_shares += new_lendable; - total_lendable.amount += new_lendable; - } - return new_lendable; - } - - extended_asset unlend( double ishares ) { - extended_asset result = total_lent; - print( "unlend: ", ishares, " existing interest_shares: ", interest_shares, "\n" ); - result.amount = int64_t( (ishares * total_lendable.amount) / interest_shares ); - - total_lendable.amount -= result.amount; - interest_shares -= ishares; - - eosio_assert( interest_shares >= 0, "underflow" ); - eosio_assert( total_lendable.amount >= 0, "underflow" ); - - return result; - } - - EOSLIB_SERIALIZE( margin_state, (total_lendable)(total_lent)(least_collateralized)(interest_shares) ) - }; - - /** - * Uses Bancor math to create a 50/50 relay between two asset types. The state of the - * bancor exchange is entirely contained within this struct. There are no external - * side effects associated with using this API. - */ - struct exchange_state { - account_name manager; - extended_asset supply; - uint32_t fee = 0; - - struct connector { - extended_asset balance; - uint32_t weight = 500; - - margin_state peer_margin; /// peer_connector collateral lending balance - - EOSLIB_SERIALIZE( connector, (balance)(weight)(peer_margin) ) - }; - - connector base; - connector quote; - - uint64_t primary_key()const { return supply.symbol.name(); } - - extended_asset convert_to_exchange( connector& c, extended_asset in ); - extended_asset convert_from_exchange( connector& c, extended_asset in ); - extended_asset convert( extended_asset from, extended_symbol to ); - - bool requires_margin_call( const exchange_state::connector& con )const; - bool requires_margin_call()const; - - EOSLIB_SERIALIZE( exchange_state, (manager)(supply)(fee)(base)(quote) ) - }; - - typedef eosio::multi_index markets; - -} /// namespace eosio diff --git a/contracts/exchange/market_state.cpp b/contracts/exchange/market_state.cpp deleted file mode 100644 index 78fa7ba9f87..00000000000 --- a/contracts/exchange/market_state.cpp +++ /dev/null @@ -1,223 +0,0 @@ -#include -#include - -namespace eosio { - - market_state::market_state( account_name this_contract, symbol_type market_symbol, exchange_accounts& acnts ) - :marketid( market_symbol.name() ), - market_table( this_contract, marketid ), - base_margins( this_contract, (marketid<<4) + 1), - quote_margins( this_contract, (marketid<<4) + 2), - base_loans( this_contract, (marketid<<4) + 1), - quote_loans( this_contract, (marketid<<4) + 2), - _accounts(acnts), - market_state_itr( market_table.find(marketid) ) - { - eosio_assert( market_state_itr != market_table.end(), "unknown market" ); - exstate = *market_state_itr; - } - - void market_state::margin_call( extended_symbol debt_type ) { - if( debt_type == exstate.base.balance.get_extended_symbol() ) - margin_call( exstate.base, base_margins ); - else - margin_call( exstate.quote, quote_margins ); - } - - void market_state::margin_call( exchange_state::connector& c, margins& marginstable ) { - auto price_idx = marginstable.get_index(); - auto pos = price_idx.begin(); - if( pos == price_idx.end() ) - return; - - auto receipt = exstate.convert( pos->collateral, pos->borrowed.get_extended_symbol() ); - eosio_assert( receipt.amount >= pos->borrowed.amount, "programmer error: insufficient collateral to cover" );/// VERY BAD, SHOULD NOT HAPPEN - auto change_debt = receipt - pos->borrowed; - - auto change_collat = exstate.convert( change_debt, pos->collateral.get_extended_symbol() ); - - _accounts.adjust_balance( pos->owner, change_collat ); - - c.peer_margin.total_lent.amount -= pos->borrowed.amount; - price_idx.erase(pos); - - pos = price_idx.begin(); - if( pos != price_idx.end() ) - c.peer_margin.least_collateralized = pos->call_price; - else - c.peer_margin.least_collateralized = double(uint64_t(-1)); - } - - - const exchange_state& market_state::initial_state()const { - return *market_state_itr; - } - - void market_state::lend( account_name lender, const extended_asset& quantity ) { - auto sym = quantity.get_extended_symbol(); - _accounts.adjust_balance( lender, -quantity ); - - if( sym == exstate.base.balance.get_extended_symbol() ) { - double new_shares = exstate.base.peer_margin.lend( quantity.amount ); - adjust_lend_shares( lender, base_loans, new_shares ); - } - else if( sym == exstate.quote.balance.get_extended_symbol() ) { - double new_shares = exstate.quote.peer_margin.lend( quantity.amount ); - adjust_lend_shares( lender, quote_loans, new_shares ); - } - else eosio_assert( false, "unable to lend to this market" ); - } - - void market_state::unlend( account_name lender, double ishares, const extended_symbol& sym ) { - eosio_assert( ishares > 0, "cannot unlend negative balance" ); - adjust_lend_shares( lender, base_loans, -ishares ); - - print( "sym: ", sym ); - - if( sym == exstate.base.balance.get_extended_symbol() ) { - extended_asset unlent = exstate.base.peer_margin.unlend( ishares ); - _accounts.adjust_balance( lender, unlent ); - } - else if( sym == exstate.quote.balance.get_extended_symbol() ) { - extended_asset unlent = exstate.quote.peer_margin.unlend( ishares ); - _accounts.adjust_balance( lender, unlent ); - } - else eosio_assert( false, "unable to lend to this market" ); - } - - - - void market_state::adjust_lend_shares( account_name lender, loans& l, double delta ) { - auto existing = l.find( lender ); - if( existing == l.end() ) { - l.emplace( lender, [&]( auto& obj ) { - obj.owner = lender; - obj.interest_shares = delta; - eosio_assert( delta >= 0, "underflow" ); - }); - } else { - l.modify( existing, 0, [&]( auto& obj ) { - obj.interest_shares += delta; - eosio_assert( obj.interest_shares >= 0, "underflow" ); - }); - } - } - - void market_state::cover_margin( account_name borrower, const extended_asset& cover_amount ) { - if( cover_amount.get_extended_symbol() == exstate.base.balance.get_extended_symbol() ) { - cover_margin( borrower, base_margins, exstate.base, cover_amount ); - } else if( cover_amount.get_extended_symbol() == exstate.quote.balance.get_extended_symbol() ) { - cover_margin( borrower, quote_margins, exstate.quote, cover_amount ); - } else { - eosio_assert( false, "invalid debt asset" ); - } - } - - - /** - * This method will use the collateral to buy the borrowed asset from the market - * with collateral to cancel the debt. - */ - void market_state::cover_margin( account_name borrower, margins& m, exchange_state::connector& c, - const extended_asset& cover_amount ) - { - auto existing = m.find( borrower ); - eosio_assert( existing != m.end(), "no known margin position" ); - eosio_assert( existing->borrowed.amount >= cover_amount.amount, "attempt to cover more than user has" ); - - auto tmp = exstate; - auto estcol = tmp.convert( cover_amount, existing->collateral.get_extended_symbol() ); - auto debpaid = exstate.convert( estcol, cover_amount.get_extended_symbol() ); - eosio_assert( debpaid.amount >= cover_amount.amount, "unable to cover debt" ); - - auto refundcover = debpaid - cover_amount; - - auto refundcol = exstate.convert( refundcover, existing->collateral.get_extended_symbol() ); - estcol.amount -= refundcol.amount; - - if( existing->borrowed.amount == cover_amount.amount ) { - auto freedcollateral = existing->collateral - estcol; - m.erase( existing ); - existing = m.begin(); - _accounts.adjust_balance( borrower, freedcollateral ); - } - else { - m.modify( existing, 0, [&]( auto& obj ) { - obj.collateral.amount -= estcol.amount; - obj.borrowed.amount -= cover_amount.amount; - obj.call_price = double(obj.borrowed.amount) / obj.collateral.amount; - }); - } - c.peer_margin.total_lent.amount -= cover_amount.amount; - - if( existing != m.end() ) { - if( existing->call_price < c.peer_margin.least_collateralized ) - c.peer_margin.least_collateralized = existing->call_price; - } else { - c.peer_margin.least_collateralized = std::numeric_limits::max(); - } - } - - void market_state::update_margin( account_name borrower, const extended_asset& delta_debt, const extended_asset& delta_col ) - { - if( delta_debt.get_extended_symbol() == exstate.base.balance.get_extended_symbol() ) { - adjust_margin( borrower, base_margins, exstate.base, delta_debt, delta_col ); - } else if( delta_debt.get_extended_symbol() == exstate.quote.balance.get_extended_symbol() ) { - adjust_margin( borrower, quote_margins, exstate.quote, delta_debt, delta_col ); - } else { - eosio_assert( false, "invalid debt asset" ); - } - } - - void market_state::adjust_margin( account_name borrower, margins& m, exchange_state::connector& c, - const extended_asset& delta_debt, const extended_asset& delta_col ) - { - auto existing = m.find( borrower ); - if( existing == m.end() ) { - eosio_assert( delta_debt.amount > 0, "cannot borrow neg" ); - eosio_assert( delta_col.amount > 0, "cannot have neg collat" ); - - existing = m.emplace( borrower, [&]( auto& obj ) { - obj.owner = borrower; - obj.borrowed = delta_debt; - obj.collateral = delta_col; - obj.call_price = double(obj.borrowed.amount) / obj.collateral.amount; - }); - } else { - if( existing->borrowed.amount == -delta_debt.amount ) { - eosio_assert( existing->collateral.amount == -delta_col.amount, "user failed to claim all collateral" ); - - m.erase( existing ); - existing = m.begin(); - } else { - m.modify( existing, 0, [&]( auto& obj ) { - obj.borrowed += delta_debt; - obj.collateral += delta_col; - obj.call_price = double(obj.borrowed.amount) / obj.collateral.amount; - }); - } - } - - c.peer_margin.total_lent += delta_debt; - eosio_assert( c.peer_margin.total_lent.amount <= c.peer_margin.total_lendable.amount, "insufficient funds availalbe to borrow" ); - - if( existing != m.end() ) { - if( existing->call_price < c.peer_margin.least_collateralized ) - c.peer_margin.least_collateralized = existing->call_price; - - eosio_assert( !exstate.requires_margin_call( c ), "this update would trigger a margin call" ); - } else { - c.peer_margin.least_collateralized = std::numeric_limits::max(); - } - - } - - - - void market_state::save() { - market_table.modify( market_state_itr, 0, [&]( auto& s ) { - s = exstate; - }); - } - -} diff --git a/contracts/exchange/market_state.hpp b/contracts/exchange/market_state.hpp deleted file mode 100644 index e145ef61cc1..00000000000 --- a/contracts/exchange/market_state.hpp +++ /dev/null @@ -1,77 +0,0 @@ -#pragma once -#include -#include - -namespace eosio { - - /** - * We calculate a unique scope for each market/borrowed_symbol/collateral_symbol and then - * instantiate a table of margin positions... with in this table each user has exactly - * one position and therefore the owner can serve as the primary key. - */ - struct margin_position { - account_name owner; - extended_asset borrowed; - extended_asset collateral; - double call_price = 0; - - uint64_t get_call()const { return uint64_t(1000000*call_price); } - uint64_t primary_key()const { return owner; } - - EOSLIB_SERIALIZE( margin_position, (owner)(borrowed)(collateral)(call_price) ) - }; - - typedef eosio::multi_index > - > margins; - - - struct loan_position { - account_name owner; /// the owner - double interest_shares; /// the number of shares in the total lent pool - - uint64_t primary_key()const { return owner; } - - EOSLIB_SERIALIZE( loan_position, (owner)(interest_shares) ) - }; - - typedef eosio::multi_index loans; - - /** - * Maintains a state along with the cache of margin positions and/or limit orders. - */ - struct market_state { - market_state( account_name this_contract, symbol_type market_symbol, exchange_accounts& acnts ); - - const exchange_state& initial_state()const; - void margin_call( extended_symbol debt_type ); - void lend( account_name lender, const extended_asset& debt ); - void unlend( account_name lender, double ishares, const extended_symbol& sym ); - void update_margin( account_name borrower, const extended_asset& delta_debt, - const extended_asset& delta_collateral ); - void cover_margin( account_name borrower, const extended_asset& cover_amount ); - - void save(); - - symbol_name marketid; - exchange_state exstate; - - markets market_table; - margins base_margins; - margins quote_margins; - loans base_loans; - loans quote_loans; - - private: - exchange_accounts& _accounts; - markets::const_iterator market_state_itr; - - void cover_margin( account_name borrower, margins& m, exchange_state::connector& c, - const extended_asset& cover_amount ); - void adjust_margin( account_name borrower, margins& m, exchange_state::connector& c, - const extended_asset& delta_debt, const extended_asset& delta_col ); - void adjust_lend_shares( account_name lender, loans& l, double delta ); - void margin_call( exchange_state::connector& c, margins& m ); - }; - -} /// namespace eosio diff --git a/contracts/exchange/test_exchange.cpp b/contracts/exchange/test_exchange.cpp deleted file mode 100644 index a6a1f9dae96..00000000000 --- a/contracts/exchange/test_exchange.cpp +++ /dev/null @@ -1,518 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -/* -#include -#include -#include -#include - -#include -#include "fixed.hpp" -*/ - -#include -#include - -//#include "bfp/lib/posit.h" - -using namespace std; - -typedef long double real_type; -typedef double token_type; - - -/* -struct margin_position { - account_name owner; - uint64_t exchange_id; - asset lent; - asset collateral; - uint64_t open_time; - - uint64_t primary_key()const{ return owner; } - uint256_t by_owner_ex_lent_collateral()const { - - } - - real_type by_call_price()const { - return collateral.amount / real_type(lent.amount); - } -}; -*/ - - - -template -Real Abs(Real Nbr) -{ - if( Nbr >= 0 ) - return Nbr; - else - return -Nbr; -} - -template -Real sqrt_safe( const Real Nbr) -{ - return sqrt(Nbr); -// cout << " " << Nbr << "\n";; - Real Number = Nbr / Real(2.0); - const Real Tolerance = Real(double(1.0e-12)); - //cout << "tol: " << Tolerance << "\n"; - - Real Sq; - Real Er; - do { - auto tmp = Nbr / Number; - tmp += Number; - tmp /= real_type(2.0); - if( Number == tmp ) break; - Number = tmp; - Sq = Number * Number; - Er = Abs(Sq - Nbr); -// wdump((Er.getDouble())(1.0e-8)(Tolerance.getDouble())); -// wdump(((Er - Tolerance).getDouble())); - }while( Er >= Tolerance ); - - return Number; -} - -typedef __uint128_t uint128_t; -typedef string account_name; -typedef string symbol_type; - -static const symbol_type exchange_symbol = "EXC"; - -struct asset { - token_type amount; - symbol_type symbol; -}; - -struct margin_key { - symbol_type lent; - symbol_type collat; -}; - -struct margin { - asset lent; - symbol_type collateral_symbol; - real_type least_collateralized_rate; -}; - -struct user_margin { - asset lent; - asset collateral; - - real_type call_price()const { - return collateral.amount / real_type(lent.amount); - } -}; - -struct exchange_state; -struct connector { - asset balance; - real_type weight = 0.5; - token_type total_lent; /// lent from maker to users - token_type total_borrowed; /// borrowed from users to maker - token_type total_available_to_lend; /// amount available to borrow - token_type interest_pool; /// total interest earned but not claimed, - /// each user can claim user_lent - - void borrow( exchange_state& ex, const asset& amount_to_borrow ); - asset convert_to_exchange( exchange_state& ex, const asset& input ); - asset convert_from_exchange( exchange_state& ex, const asset& input ); -}; - - -struct balance_key { - account_name owner; - symbol_type symbol; - - friend bool operator < ( const balance_key& a, const balance_key& b ) { - return std::tie( a.owner, a.symbol ) < std::tie( b.owner, b.symbol ); - } - friend bool operator == ( const balance_key& a, const balance_key& b ) { - return std::tie( a.owner, a.symbol ) == std::tie( b.owner, b.symbol ); - } -}; - -real_type fee = 1;//.9995; - - -int64_t maxtrade = 20000ll; - -struct exchange_state { - token_type supply; - symbol_type symbol = exchange_symbol; - - connector base; - connector quote; - - void transfer( account_name user, asset q ) { - output[balance_key{user,q.symbol}] += q.amount; - } - map output; - vector margins; -}; - -/* -void connector::borrow( exchange_state& ex, account_name user, - asset amount_to_borrow, - asset collateral, - user_margin& marg ) { - FC_ASSERT( amount_to_borrow.amount < balance.amount, "attempt to borrow too much" ); - lent.amount += amount_to_borrow.amount; - balance.amount -= amount_to_borrow.amount; - ex.transfer( user, amount_to_borrow ); - - marg.collateral.amount += collateral.amount; - marg.lent.amount += amount_to_borrow.amount; - auto p = marg.price(); - - if( collateral.symbol == ex.symbol ) { - if( p > ex_margin.least_collateralized_rate ) - ex_margin.least_collateralized_rate = p; - } - else if( collateral.symbol == peer_margin.collateral.symbol ) { - if( p > peer_margin.least_collateralized_rate ) - peer_margin.least_collateralized_rate = p; - } -} -*/ - -asset connector::convert_to_exchange( exchange_state& ex, const asset& input ) { - - real_type R(ex.supply); - real_type S(balance.amount+input.amount); - real_type F(weight); - real_type T(input.amount); - real_type ONE(1.0); - - auto E = R * (ONE - std::pow( ONE + T / S, F) ); - - - //auto real_issued = real_type(ex.supply) * (sqrt_safe( 1.0 + (real_type(input.amount) / (balance.amount+input.amount))) - 1.0); - //auto real_issued = real_type(ex.supply) * (std::pow( 1.0 + (real_type(input.amount) / (balance.amount+input.amount)), weight) - real_type(1.0)); - //auto real_issued = R * (std::pow( ONE + (T / S), F) - ONE); - - //wdump((double(E))(double(real_issued))); - token_type issued = -E; //real_issued; - - - ex.supply += issued; - balance.amount += input.amount; - - return asset{ issued, exchange_symbol }; -} - -asset connector::convert_from_exchange( exchange_state& ex, const asset& input ) { - - real_type R(ex.supply - input.amount); - real_type S(balance.amount); - real_type F(weight); - real_type E(input.amount); - real_type ONE(1.0); - - real_type T = S * (std::pow( ONE + E/R, ONE/F) - ONE); - - - /* - real_type base = real_type(1.0) + ( real_type(input.amount) / real_type(ex.supply-input.amount)); - auto out = (balance.amount * ( std::pow(base,1.0/weight) - real_type(1.0) )); - */ - auto out = T; - -// edump((double(out-T))(double(out))(double(T))); - - ex.supply -= input.amount; - balance.amount -= token_type(out); - return asset{ token_type(out), balance.symbol }; -} - - -void eosio_assert( bool test, const string& msg ) { - if( !test ) throw std::runtime_error( msg ); -} - -void print_state( const exchange_state& e ); - - - -/** - * Given the current state, calculate the new state - */ -exchange_state convert( const exchange_state& current, - account_name user, - asset input, - asset min_output, - asset* out = nullptr) { - - eosio_assert( min_output.symbol != input.symbol, "cannot convert" ); - - exchange_state result(current); - - asset initial_output = input; - - if( input.symbol != exchange_symbol ) { - if( input.symbol == result.base.balance.symbol ) { - initial_output = result.base.convert_to_exchange( result, input ); - } - else if( input.symbol == result.quote.balance.symbol ) { - initial_output = result.quote.convert_to_exchange( result, input ); - } - else eosio_assert( false, "invalid symbol" ); - } else { - if( min_output.symbol == result.base.balance.symbol ) { - initial_output = result.base.convert_from_exchange( result, initial_output ); - } - else if( min_output.symbol == result.quote.balance.symbol ) { - initial_output= result.quote.convert_from_exchange( result, initial_output ); - } - else eosio_assert( false, "invalid symbol" ); - } - - - - asset final_output = initial_output; - -// std::cerr << "\n\nconvert " << input.amount << " "<< input.symbol << " => " << final_output.amount << " " << final_output.symbol << " final: " << min_output.symbol << " \n"; - - result.output[ balance_key{user,final_output.symbol} ] += final_output.amount; - result.output[ balance_key{user,input.symbol} ] -= input.amount; - - if( min_output.symbol != final_output.symbol ) { - return convert( result, user, final_output, min_output, out ); - } - - if( out ) *out = final_output; - return result; -} - -/* VALIDATE MARGIN ALGORITHM - * - * Given an initial condition, verify that all margin positions can be filled. - * - * Assume 3 assets, B, Q, and X and the notation LENT-COLLAT we get the following - * pairs: - * - * B-X - * B-A - * A-X - * A-B - * X-A - * X-B - * - * We assume that pairs of the same lent-type have to be simultainously filled, - * as filling one could make it impossible to fill the other. - * - * -void validate_margin( exchange_state& e ) { - for( const auto& pos : e.margins ) { - token_type min_collat = pos.lent.amount * pos.least_collateralized_rate; - asset received; - e = convert( e, "user", asset{ min_collat, pos.first.collat }, pos.lent, &received ); - FC_ASSERT( received > pos.lent.amount, "insufficient collateral" ); - - received.amount -= pos.lent.amount; - e = convert( e, "user", received, asset{ token_type(0), pos.collateral_symbol} ); - } -} -*/ - - - - - -/** - * A user has Collateral C and wishes to borrow B, so we give user B - * provided that C is enough to buy B back after removing it from market and - * that no margin calls would be triggered. - */ -exchange_state borrow( const exchange_state& current, account_name user, - asset amount_to_borrow, - asset collateral_provided ) { - FC_ASSERT( amount_to_borrow.symbol != collateral_provided.symbol ); - - /// lookup the margin position for user - /// update user's margin position - /// update least collateralized margin position on state - /// remove amount_to_borrow from exchange - /// lock collateral for user - /// simulate complete margin calls - return exchange_state(); -} - -exchange_state cover( const exchange_state& current, account_name user, - asset amount_to_cover, asset collateral_to_cover_with ) -{ - /// lookup existing position for user/debt/collat - /// verify collat > collateral_to_cover_with - /// sell collateral_to_cover_with for debt on market - /// reduce debt by proceeds - /// add proceeds to connector - // - if borrowed from user, reduce borrowed from user - /// calculate new call price and update least collateralized position - /// simulate complete margin calls - return exchange_state(); -} - -exchange_state lend( const exchange_state& current, account_name lender, - asset asset_to_lend ) { - /// add to pool of funds available for lending and buy SHARES in - /// interest pool at current rate. - return exchange_state(); -} - -exchange_state unlend( const exchange_state& current, account_name lender, - asset asset_to_lend ) { - /// sell shares in interest pool at current rate - /// this is permitable so long as total borrowed from users remains less than - /// total available to lend. Otherwise, margin is called on the least - /// collateralized position. - return exchange_state(); -} - - - -void print_state( const exchange_state& e ) { - std::cerr << "\n-----------------------------\n"; - std::cerr << "supply: " << e.supply << "\n"; - std::cerr << "base: " << e.base.balance.amount << " " << e.base.balance.symbol << "\n"; - std::cerr << "quote: " << e.quote.balance.amount << " " << e.quote.balance.symbol << "\n"; - - for( const auto& item : e.output ) { - cerr << item.first.owner << " " << item.second << " " << item.first.symbol << "\n"; - } - std::cerr << "\n-----------------------------\n"; -} - - -int main( int argc, char** argv ) { - // std::cerr << "root: " << double(root.numerator())/root.denominator() << "\n"; - - - exchange_state state; - state.supply = 100000000000ll; - //state.base.weight = state.total_weight / 2.; - state.base.balance.amount = 100000000; - state.base.balance.symbol = "USD"; - state.base.weight = .49; - //state.quote.weight = state.total_weight / 2.; - state.quote.balance.amount = state.base.balance.amount; - state.quote.balance.symbol = "BTC"; - state.quote.weight = .51; - - print_state( state ); - - //state = convert( state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - - auto start = fc::time_point::now(); - for( uint32_t i = 0; i < 10000; ++i ) { - if( rand() % 2 == 0 ) - state = convert( state, "dan", asset{ token_type(uint32_t(rand())%maxtrade), "USD"}, asset{ 0, "BTC" } ); - else - state = convert( state, "dan", asset{ token_type(uint32_t(rand())%maxtrade), "BTC"}, asset{ 0, "USD" } ); - } - for( const auto& item : state.output ) { - if( item.second > 0 ) { - if( item.first.symbol == "USD" ) - state = convert( state, "dan", asset{ item.second, item.first.symbol}, asset{ 0, "BTC" } ); - else - state = convert( state, "dan", asset{ item.second, item.first.symbol}, asset{ 0, "USD" } ); - break; - } - } - print_state( state ); - - auto end = fc::time_point::now(); - wdump((end-start)); - /* - auto new_state = convert( state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 92.5-0.08-.53, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - */ - - //new_state = convert( new_state, "dan", asset{ 442+487-733+280+349+4.493+62.9, "BTC"}, asset{ 0, "USD" } ); - /* - auto new_state = convert( state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 442+487, "BTC"}, asset{ 0, "USD" } ); - */ - /* - new_state = convert( new_state, "dan", asset{ 487, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 442, "BTC"}, asset{ 0, "USD" } ); - */ - //new_state = convert( new_state, "dan", asset{ 526, "BTC"}, asset{ 0, "USD" } ); - //new_state = convert( new_state, "dan", asset{ 558, "BTC"}, asset{ 0, "USD" } ); - //new_state = convert( new_state, "dan", asset{ 1746, "BTC"}, asset{ 0, "USD" } ); - /* - new_state = convert( new_state, "dan", asset{ 526, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "EXC" } ); - new_state = convert( new_state, "dan", asset{ 500, "BTC"}, asset{ 0, "EXC" } ); - new_state = convert( new_state, "dan", asset{ 10, "EXC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 10, "EXC"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 2613, "BTC"}, asset{ 0, "USD" } ); - */ - - - - /* - auto new_state = convert( state, "dan", asset{ 10, "EXC"}, asset{ 0, "USD" } ); - - print_state( new_state ); - - new_state = convert( state, "dan", asset{ 10, "EXC"}, asset{ 0, "BTC" } ); - print_state( new_state ); - new_state = convert( new_state, "dan", asset{ 10, "EXC"}, asset{ 0, "USD" } ); - print_state( new_state ); - - - //new_state = convert( new_state, "dan", asset{ 52, "USD"}, asset{ 0, "EXC" } ); - */ - - return 0; -} - - - -#if 0 - -0. if( margin_fault ) - Convert Least Collateral - if( margin fault )) - defer - -if( margin_fault ) assert( false, "busy calling" ); - -1. Fill Incoming Order -2. Check Counter Order -3. if( margin fault ) - Defer Trx to finish margin call - - -#endif diff --git a/contracts/identity/test/identity_test.abi b/contracts/identity/test/identity_test.abi index 938d5ef2c8a..a0b450f8ac4 100644 --- a/contracts/identity/test/identity_test.abi +++ b/contracts/identity/test/identity_test.abi @@ -1,4 +1,5 @@ { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" diff --git a/contracts/payloadless/payloadless.abi b/contracts/payloadless/payloadless.abi index c68563b7c14..1ea79c6f275 100644 --- a/contracts/payloadless/payloadless.abi +++ b/contracts/payloadless/payloadless.abi @@ -1,4 +1,5 @@ { + "version": "eosio::abi/1.0", "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-04-19T09:07:16", "types": [], "structs": [{ diff --git a/contracts/test_api/test_action.cpp b/contracts/test_api/test_action.cpp index 1bc54cb3d02..496b30fcc37 100644 --- a/contracts/test_api/test_action.cpp +++ b/contracts/test_api/test_action.cpp @@ -174,6 +174,17 @@ void test_action::require_notice(uint64_t receiver, uint64_t code, uint64_t acti eosio_assert(false, "Should've failed"); } +void test_action::require_notice_tests(uint64_t receiver, uint64_t code, uint64_t action) { + eosio::print( "require_notice_tests" ); + if( receiver == N( testapi ) ) { + eosio::print( "require_recipient( N(acc5) )" ); + eosio::require_recipient( N( acc5 ) ); + } else if( receiver == N( acc5 ) ) { + eosio::print( "require_recipient( N(testapi) )" ); + eosio::require_recipient( N( testapi ) ); + } +} + void test_action::require_auth() { prints("require_auth"); eosio::require_auth( N(acc3) ); diff --git a/contracts/test_api/test_api.cpp b/contracts/test_api/test_api.cpp index 8f1922f2d4f..9ddcb712752 100644 --- a/contracts/test_api/test_api.cpp +++ b/contracts/test_api/test_api.cpp @@ -71,6 +71,7 @@ extern "C" { WASM_TEST_HANDLER(test_action, read_action_to_0); WASM_TEST_HANDLER(test_action, read_action_to_64k); WASM_TEST_HANDLER_EX(test_action, require_notice); + WASM_TEST_HANDLER_EX(test_action, require_notice_tests); WASM_TEST_HANDLER(test_action, require_auth); WASM_TEST_HANDLER(test_action, assert_false); WASM_TEST_HANDLER(test_action, assert_true); diff --git a/contracts/test_api/test_api.hpp b/contracts/test_api/test_api.hpp index a8fa3f21d65..4dc5711e3a1 100644 --- a/contracts/test_api/test_api.hpp +++ b/contracts/test_api/test_api.hpp @@ -63,6 +63,7 @@ struct test_action { static void test_dummy_action(); static void test_cf_action(); static void require_notice(uint64_t receiver, uint64_t code, uint64_t action); + static void require_notice_tests(uint64_t receiver, uint64_t code, uint64_t action); static void require_auth(); static void assert_false(); static void assert_true(); diff --git a/contracts/test_ram_limit/test_ram_limit.abi b/contracts/test_ram_limit/test_ram_limit.abi index 366fa0d064c..9d3413b8b8e 100644 --- a/contracts/test_ram_limit/test_ram_limit.abi +++ b/contracts/test_ram_limit/test_ram_limit.abi @@ -1,5 +1,6 @@ { "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-03-29T02:09:11", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" diff --git a/eosio_build.sh b/eosio_build.sh index 59c76b0b54e..1c32a5ec86f 100755 --- a/eosio_build.sh +++ b/eosio_build.sh @@ -120,7 +120,7 @@ pushd "${SOURCE_DIR}" &> /dev/null - STALE_SUBMODS=$(( $(git submodule status | grep -c "^[+\-]") )) + STALE_SUBMODS=$(( $(git submodule status --recursive | grep -c "^[+\-]") )) if [ $STALE_SUBMODS -gt 0 ]; then printf "\\n\\tgit submodules are not up to date.\\n" printf "\\tPlease run the command 'git submodule update --init --recursive'.\\n" diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index a36e79cf5d2..78593706c7d 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -8,3 +8,9 @@ add_subdirectory( appbase ) add_subdirectory( chain ) add_subdirectory( testing ) add_subdirectory( abi_generator ) + +#turn these off for now +set(BUILD_TESTS OFF CACHE BOOL "Build GTest-based tests") +set(BUILD_TOOLS OFF CACHE BOOL "Build wabt tools") +set(RUN_RE2C OFF CACHE BOOL "Run re2c") +add_subdirectory( wabt ) diff --git a/libraries/appbase b/libraries/appbase index fa0e7fd9aa8..6e440a7f3c5 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit fa0e7fd9aa8be6ddc0c2f620cae63e58fefafab2 +Subproject commit 6e440a7f3c51f3b8226860663b5eb6446087fed9 diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index af540a38eca..cf4c1be184d 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -38,6 +38,7 @@ add_library( eosio_chain webassembly/wavm.cpp webassembly/binaryen.cpp + webassembly/wabt.cpp # get_config.cpp # global_property_object.cpp @@ -50,12 +51,14 @@ add_library( eosio_chain ) target_link_libraries( eosio_chain eos_utilities fc chainbase Logging IR WAST WASM Runtime - wasm asmjs passes cfg ast emscripten-optimizer support softfloat builtins + wasm asmjs passes cfg ast emscripten-optimizer support softfloat builtins wabt ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../wasm-jit/Include" "${CMAKE_CURRENT_SOURCE_DIR}/../../externals/binaryen/src" + "${CMAKE_SOURCE_DIR}/libraries/wabt" + "${CMAKE_BINARY_DIR}/libraries/wabt" ) install( TARGETS eosio_chain diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 60303a5268f..1c654ddc4d8 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -105,11 +105,14 @@ namespace eosio { namespace chain { void abi_serializer::set_abi(const abi_def& abi, const fc::microseconds& max_serialization_time) { const fc::time_point deadline = fc::time_point::now() + max_serialization_time; + EOS_ASSERT(starts_with(abi.version, "eosio::abi/1."), unsupported_abi_version_exception, "ABI has an unsupported version"); + typedefs.clear(); structs.clear(); actions.clear(); tables.clear(); error_messages.clear(); + variants.clear(); for( const auto& st : abi.structs ) structs[st.name] = st; @@ -129,6 +132,9 @@ namespace eosio { namespace chain { for( const auto& e : abi.error_messages ) error_messages[e.error_code] = e.error_msg; + for( const auto& v : abi.variants.value ) + variants[v.name] = v; + /** * The ABI vector may contain duplicates which would make it * an invalid ABI @@ -138,6 +144,7 @@ namespace eosio { namespace chain { EOS_ASSERT( actions.size() == abi.actions.size(), duplicate_abi_action_def_exception, "duplicate action definition detected" ); EOS_ASSERT( tables.size() == abi.tables.size(), duplicate_abi_table_def_exception, "duplicate table definition detected" ); EOS_ASSERT( error_messages.size() == abi.error_messages.size(), duplicate_abi_err_msg_def_exception, "duplicate error message definition detected" ); + EOS_ASSERT( variants.size() == abi.variants.value.size(), duplicate_abi_variant_def_exception, "duplicate variant definition detected" ); validate(deadline, max_serialization_time); } @@ -183,6 +190,13 @@ namespace eosio { namespace chain { } } + type_name abi_serializer::_remove_bin_extension(const type_name& type) { + if( ends_with(type, "$") ) + return type.substr(0, type.size()-1); + else + return type; + } + bool abi_serializer::_is_type(const type_name& rtype, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const { EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); if( ++recursion_depth > max_recursion_depth) return false; @@ -190,6 +204,7 @@ namespace eosio { namespace chain { if( built_in_types.find(type) != built_in_types.end() ) return true; if( typedefs.find(type) != typedefs.end() ) return _is_type(typedefs.find(type)->second, recursion_depth, deadline, max_serialization_time); if( structs.find(type) != structs.end() ) return true; + if( variants.find(type) != variants.end() ) return true; return false; } @@ -227,9 +242,15 @@ namespace eosio { namespace chain { } for( const auto& field : s.second.fields ) { try { EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(field.type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",field.type) ); + EOS_ASSERT(_is_type(_remove_bin_extension(field.type), 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",field.type) ); } FC_CAPTURE_AND_RETHROW( (field) ) } } FC_CAPTURE_AND_RETHROW( (s) ) } + for( const auto& s : variants ) { try { + for( const auto& type : s.second.types ) { try { + EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + EOS_ASSERT(_is_type(type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",type) ); + } FC_CAPTURE_AND_RETHROW( (type) ) } + } FC_CAPTURE_AND_RETHROW( (s) ) } for( const auto& a : actions ) { try { EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); EOS_ASSERT(_is_type(a.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",a.second) ); @@ -264,7 +285,9 @@ namespace eosio { namespace chain { _binary_to_variant(resolve_type(st.base), stream, obj, recursion_depth, deadline, max_serialization_time); } for( const auto& field : st.fields ) { - obj( field.name, _binary_to_variant(resolve_type(field.type), stream, recursion_depth, deadline, max_serialization_time) ); + if( !stream.remaining() && ends_with(field.type, "$") ) + continue; + obj( field.name, _binary_to_variant(resolve_type(_remove_bin_extension(field.type)), stream, recursion_depth, deadline, max_serialization_time) ); } } @@ -289,7 +312,7 @@ namespace eosio { namespace chain { vars.emplace_back(std::move(v)); } EOS_ASSERT( vars.size() == size.value, - unpack_exception, + unpack_exception, "packed size does not match unpacked array size, packed size ${p} actual size ${a}", ("p", size)("a", vars.size()) ); return fc::variant( std::move(vars) ); @@ -297,6 +320,14 @@ namespace eosio { namespace chain { char flag; fc::raw::unpack(stream, flag); return flag ? _binary_to_variant(ftype, stream, recursion_depth, deadline, max_serialization_time) : fc::variant(); + } else { + auto v = variants.find(rtype); + if( v != variants.end() ) { + fc::unsigned_int select; + fc::raw::unpack(stream, select); + EOS_ASSERT( (size_t)select < v->second.types.size(), unpack_exception, "Invalid packed variant" ); + return vector{v->second.types[select], _binary_to_variant(v->second.types[select], stream, recursion_depth, deadline, max_serialization_time)}; + } } fc::mutable_variant_object mvo; @@ -314,7 +345,7 @@ namespace eosio { namespace chain { return _binary_to_variant(type, ds, recursion_depth, deadline, max_serialization_time); } - void abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, + void abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, bool allow_extensions, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const { try { EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); @@ -328,8 +359,15 @@ namespace eosio { namespace chain { vector vars = var.get_array(); fc::raw::pack(ds, (fc::unsigned_int)vars.size()); for (const auto& var : vars) { - _variant_to_binary(fundamental_type(rtype), var, ds, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(fundamental_type(rtype), var, ds, false, recursion_depth, deadline, max_serialization_time); } + } else if ( variants.find(rtype) != variants.end() ) { + EOS_ASSERT( var.is_array() && var.size() == 2 && var[size_t(0)].is_string(), abi_exception, "expected array containing variant" ); + auto& v = variants.find(rtype)->second; + auto it = find(v.types.begin(), v.types.end(), var[size_t(0)].get_string()); + EOS_ASSERT( it != v.types.end(), abi_exception, "type is not valid within this variant" ); + fc::raw::pack(ds, fc::unsigned_int(it - v.types.begin())); + _variant_to_binary( *it, var[size_t(1)], ds, allow_extensions, recursion_depth, deadline, max_serialization_time ); } else { const auto& st = get_struct(rtype); @@ -337,15 +375,17 @@ namespace eosio { namespace chain { const auto& vo = var.get_object(); if( st.base != type_name() ) { - _variant_to_binary(resolve_type(st.base), var, ds, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(resolve_type(st.base), var, ds, false, recursion_depth, deadline, max_serialization_time); } + bool missing_extension = false; for( const auto& field : st.fields ) { if( vo.contains( string(field.name).c_str() ) ) { - _variant_to_binary(field.type, vo[field.name], ds, recursion_depth, deadline, max_serialization_time); - } - else { - _variant_to_binary(field.type, fc::variant(), ds, recursion_depth, deadline, max_serialization_time); - /// TODO: default construct field and write it out + if( missing_extension ) + EOS_THROW( pack_exception, "Unexpected '${f}' in variant object", ("f",field.name) ); + _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); + } else if( ends_with(field.type, "$") && allow_extensions ) { + missing_extension = true; + } else { EOS_THROW( pack_exception, "Missing '${f}' in variant object", ("f",field.name) ); } } @@ -353,20 +393,23 @@ namespace eosio { namespace chain { const auto& va = var.get_array(); EOS_ASSERT( st.base == type_name(), invalid_type_inside_abi, "support for base class as array not yet implemented" ); uint32_t i = 0; - if (va.size() > 0) { - for( const auto& field : st.fields ) { - if( va.size() > i ) - _variant_to_binary(field.type, va[i], ds, recursion_depth, deadline, max_serialization_time); - else - _variant_to_binary(field.type, fc::variant(), ds, recursion_depth, deadline, max_serialization_time); - ++i; - } + for( const auto& field : st.fields ) { + if( va.size() > i ) + _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); + else if( ends_with(field.type, "$") && allow_extensions ) + break; + else + EOS_THROW( pack_exception, "Early end to array specifying the fields of struct '${t}'; require input for field '${f}'", + ("t", st.name)("f", field.name) ); + ++i; } + } else { + EOS_THROW( pack_exception, "Failed to serialize struct '${t}' in variant object", ("t", st.name)); } } } FC_CAPTURE_AND_RETHROW( (type)(var) ) } - bytes abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, + bytes abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, bool allow_extensions, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const { try { EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); @@ -377,7 +420,7 @@ namespace eosio { namespace chain { bytes temp( 1024*1024 ); fc::datastream ds(temp.data(), temp.size() ); - _variant_to_binary(type, var, ds, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(type, var, ds, allow_extensions, recursion_depth, deadline, max_serialization_time); temp.resize(ds.tellp()); return temp; } FC_CAPTURE_AND_RETHROW( (type)(var) ) } diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 0bbfb8e3aec..f7513debf62 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -76,7 +76,13 @@ action_trace apply_context::exec_one() action_trace t(r); t.trx_id = trx_context.id; + t.block_num = control.pending_block_state()->block_num; + t.block_time = control.pending_block_time(); + t.producer_block_id = control.pending_producer_block_id(); + t.account_ram_deltas = std::move( _account_ram_deltas ); + _account_ram_deltas.clear(); t.act = act; + t.context_free = context_free; t.console = _pending_console_output.str(); trx_context.executed.emplace_back( move(r) ); @@ -102,7 +108,7 @@ void apply_context::exec() if( _cfa_inline_actions.size() > 0 || _inline_actions.size() > 0 ) { EOS_ASSERT( recurse_depth < control.get_global_properties().configuration.max_inline_action_depth, - transaction_exception, "inline action recursion depth reached" ); + transaction_exception, "max inline action depth per transaction reached" ); } for( const auto& inline_action : _cfa_inline_actions ) { @@ -275,7 +281,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a "Replacing a deferred transaction is temporarily disabled." ); // TODO: The logic of the next line needs to be incorporated into the next hard fork. - // trx_context.add_ram_usage( ptr->payer, -(config::billable_size_v + ptr->packed_trx.size()) ); + // add_ram_usage( ptr->payer, -(config::billable_size_v + ptr->packed_trx.size()) ); d.modify( *ptr, [&]( auto& gtx ) { gtx.sender = receiver; @@ -303,14 +309,14 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act.account) || (receiver == payer) || privileged, subjective_block_production_exception, "Cannot charge RAM to other accounts during notify." ); - trx_context.add_ram_usage( payer, (config::billable_size_v + trx_size) ); + add_ram_usage( payer, (config::billable_size_v + trx_size) ); } bool apply_context::cancel_deferred_transaction( const uint128_t& sender_id, account_name sender ) { auto& generated_transaction_idx = db.get_mutable_index(); const auto* gto = db.find(boost::make_tuple(sender, sender_id)); if ( gto ) { - trx_context.add_ram_usage( gto->payer, -(config::billable_size_v + gto->packed_trx.size()) ); + add_ram_usage( gto->payer, -(config::billable_size_v + gto->packed_trx.size()) ); generated_transaction_idx.remove(*gto); } return gto; @@ -369,7 +375,7 @@ void apply_context::update_db_usage( const account_name& payer, int64_t delta ) require_authorization( payer ); } } - trx_context.add_ram_usage(payer, delta); + add_ram_usage(payer, delta); } @@ -634,5 +640,14 @@ uint64_t apply_context::next_auth_sequence( account_name actor ) { return rs.auth_sequence; } +void apply_context::add_ram_usage( account_name account, int64_t ram_delta ) { + trx_context.add_ram_usage( account, ram_delta ); + + auto p = _account_ram_deltas.emplace( account, ram_delta ); + if( !p.second ) { + p.first->delta += ram_delta; + } +} + } } /// eosio::chain diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index c5b29397bc4..02207578d98 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -12,6 +12,7 @@ #include #include #include +#include namespace eosio { namespace chain { @@ -402,7 +403,8 @@ namespace eosio { namespace chain { EOS_ASSERT( checker.satisfied( p.first, p.second ), unsatisfied_authorization, "transaction declares authority '${auth}', " "but does not have signatures for it under a provided delay of ${provided_delay} ms, " - "provided permissions ${provided_permissions}, and provided keys ${provided_keys}", + "provided permissions ${provided_permissions}, provided keys ${provided_keys}, " + "and a delay max limit of ${delay_max_limit_ms} ms", ("auth", p.first) ("provided_delay", provided_delay.count()/1000) ("provided_permissions", provided_permissions) @@ -443,7 +445,8 @@ namespace eosio { namespace chain { EOS_ASSERT( checker.satisfied({account, permission}), unsatisfied_authorization, "permission '${auth}' was not satisfied under a provided delay of ${provided_delay} ms, " - "provided permissions ${provided_permissions}, and provided keys ${provided_keys}", + "provided permissions ${provided_permissions}, provided keys ${provided_keys}, " + "and a delay max limit of ${delay_max_limit_ms} ms", ("auth", permission_level{account, permission}) ("provided_delay", provided_delay.count()/1000) ("provided_permissions", provided_permissions) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index ce3a6a47b02..0cf0627fd2b 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -83,6 +83,8 @@ struct pending_state { controller::block_status _block_status = controller::block_status::incomplete; + optional _producer_block_id; + void push() { _db_session.push(); } @@ -106,6 +108,7 @@ struct controller_impl { db_read_mode read_mode = db_read_mode::SPECULATIVE; bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped optional subjective_cpu_leeway; + bool trusted_producer_light_validation = false; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -629,6 +632,9 @@ struct controller_impl { if( gtrx.expiration < self.pending_block_time() ) { trace = std::make_shared(); trace->id = gtrx.trx_id; + trace->block_num = self.pending_block_state()->block_num; + trace->block_time = self.pending_block_time(); + trace->producer_block_id = self.pending_producer_block_id(); trace->scheduled = true; trace->receipt = push_receipt( gtrx.trx_id, transaction_receipt::expired, billed_cpu_time_us, 0 ); // expire the transaction emit( self.accepted_transaction, trx ); @@ -868,7 +874,9 @@ struct controller_impl { } /// push_transaction - void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s ) { + void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, + const optional& producer_block_id ) + { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); auto guard_pending = fc::make_scoped_exit([this](){ @@ -885,6 +893,7 @@ struct controller_impl { } pending->_block_status = s; + pending->_producer_block_id = producer_block_id; pending->_pending_block_state = std::make_shared( *head, when ); // promotes pending schedule (if any) to active pending->_pending_block_state->in_current_chain = true; @@ -953,7 +962,8 @@ struct controller_impl { void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { try { EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); - start_block( b->timestamp, b->confirmed, s ); + auto producer_block_id = b->id(); + start_block( b->timestamp, b->confirmed, s , producer_block_id); transaction_trace_ptr trace; @@ -993,9 +1003,9 @@ struct controller_impl { finalize_block(); // this implicitly asserts that all header fields (less the signature) are identical - EOS_ASSERT(b->id() == pending->_pending_block_state->header.id(), + EOS_ASSERT(producer_block_id == pending->_pending_block_state->header.id(), block_validate_exception, "Block ID does not match", - ("producer_block_id",b->id())("validator_block_id",pending->_pending_block_state->header.id())); + ("producer_block_id",producer_block_id)("validator_block_id",pending->_pending_block_state->header.id())); // We need to fill out the pending block state's block because that gets serialized in the reversible block log // in the future we can optimize this by serializing the original and not the copy @@ -1018,14 +1028,20 @@ struct controller_impl { void push_block( const signed_block_ptr& b, controller::block_status s ) { - // idump((fc::json::to_pretty_string(*b))); EOS_ASSERT(!pending, block_validate_exception, "it is not valid to push a block when there is a pending block"); + + auto reset_prod_light_validation = fc::make_scoped_exit([old_value=trusted_producer_light_validation, this]() { + trusted_producer_light_validation = old_value; + }); try { EOS_ASSERT( b, block_validate_exception, "trying to push empty block" ); EOS_ASSERT( s != controller::block_status::incomplete, block_validate_exception, "invalid block status for a completed block" ); emit( self.pre_accepted_block, b ); bool trust = !conf.force_all_checks && (s == controller::block_status::irreversible || s == controller::block_status::validated); auto new_header_state = fork_db.add( b, trust ); + if (conf.trusted_producers.count(b->producer)) { + trusted_producer_light_validation = true; + }; emit( self.accepted_block_header, new_header_state ); if ( read_mode != db_read_mode::IRREVERSIBLE ) { @@ -1391,7 +1407,7 @@ fork_database& controller::fork_db()const { return my->fork_db; } void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { validate_db_available_size(); - my->start_block(when, confirm_block_count, block_status::incomplete ); + my->start_block(when, confirm_block_count, block_status::incomplete, optional() ); } void controller::finalize_block() { @@ -1523,6 +1539,11 @@ time_point controller::pending_block_time()const { return my->pending->_pending_block_state->header.timestamp; } +optional controller::pending_producer_block_id()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + return my->pending->_producer_block_id; +} + uint32_t controller::last_irreversible_block_num() const { return std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum); } @@ -1659,13 +1680,14 @@ bool controller::light_validation_allowed(bool replay_opts_disabled_by_policy) c return false; } - auto pb_status = my->pending->_block_status; + const auto pb_status = my->pending->_block_status; // in a pending irreversible or previously validated block and we have forcing all checks - bool consider_skipping_on_replay = (pb_status == block_status::irreversible || pb_status == block_status::validated) && !replay_opts_disabled_by_policy; + const bool consider_skipping_on_replay = (pb_status == block_status::irreversible || pb_status == block_status::validated) && !replay_opts_disabled_by_policy; // OR in a signed block and in light validation mode - bool consider_skipping_on_validate = (pb_status == block_status::complete && my->conf.block_validation_mode == validation_mode::LIGHT); + const bool consider_skipping_on_validate = (pb_status == block_status::complete && + (my->conf.block_validation_mode == validation_mode::LIGHT || my->trusted_producer_light_validation)); return consider_skipping_on_replay || consider_skipping_on_validate; } diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index 71d846b38be..33a123981a1 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -121,7 +121,7 @@ void apply_eosio_newaccount(apply_context& context) { ram_delta += owner_permission.auth.get_billable_size(); ram_delta += active_permission.auth.get_billable_size(); - context.trx_context.add_ram_usage(create.name, ram_delta); + context.add_ram_usage(create.name, ram_delta); } FC_CAPTURE_AND_RETHROW( (create) ) } @@ -167,7 +167,7 @@ void apply_eosio_setcode(apply_context& context) { }); if (new_size != old_size) { - context.trx_context.add_ram_usage( act.account, new_size - old_size ); + context.add_ram_usage( act.account, new_size - old_size ); } } @@ -196,7 +196,7 @@ void apply_eosio_setabi(apply_context& context) { }); if (new_size != old_size) { - context.trx_context.add_ram_usage( act.account, new_size - old_size ); + context.add_ram_usage( act.account, new_size - old_size ); } } @@ -254,13 +254,13 @@ void apply_eosio_updateauth(apply_context& context) { int64_t new_size = (int64_t)(config::billable_size_v + permission->auth.get_billable_size()); - context.trx_context.add_ram_usage( permission->owner, new_size - old_size ); + context.add_ram_usage( permission->owner, new_size - old_size ); } else { const auto& p = authorization.create_permission( update.account, update.permission, parent_id, update.auth ); int64_t new_size = (int64_t)(config::billable_size_v + p.auth.get_billable_size()); - context.trx_context.add_ram_usage( update.account, new_size ); + context.add_ram_usage( update.account, new_size ); } } @@ -291,7 +291,7 @@ void apply_eosio_deleteauth(apply_context& context) { authorization.remove_permission( permission ); - context.trx_context.add_ram_usage( remove.account, -old_size ); + context.add_ram_usage( remove.account, -old_size ); } @@ -334,7 +334,7 @@ void apply_eosio_linkauth(apply_context& context) { link.required_permission = requirement.requirement; }); - context.trx_context.add_ram_usage( + context.add_ram_usage( l.account, (int64_t)(config::billable_size_v) ); @@ -354,7 +354,7 @@ void apply_eosio_unlinkauth(apply_context& context) { auto link_key = boost::make_tuple(unlink.account, unlink.code, unlink.type); auto link = db.find(link_key); EOS_ASSERT(link != nullptr, action_validate_exception, "Attempting to unlink authority, but no link found"); - context.trx_context.add_ram_usage( + context.add_ram_usage( link->account, -(int64_t)(config::billable_size_v) ); diff --git a/libraries/chain/include/eosio/chain/abi_def.hpp b/libraries/chain/include/eosio/chain/abi_def.hpp index 3782c3f7e5b..f00dd19884d 100644 --- a/libraries/chain/include/eosio/chain/abi_def.hpp +++ b/libraries/chain/include/eosio/chain/abi_def.hpp @@ -94,11 +94,20 @@ struct error_message { string error_msg; }; +struct variant_def { + type_name name; + vector types; +}; + +template +struct may_not_exist { + T value{}; +}; + struct abi_def { abi_def() = default; abi_def(const vector& types, const vector& structs, const vector& actions, const vector& tables, const vector& clauses, const vector& error_msgs) - :version("eosio::abi/1.0") - ,types(types) + :types(types) ,structs(structs) ,actions(actions) ,tables(tables) @@ -106,14 +115,15 @@ struct abi_def { ,error_messages(error_msgs) {} - string version = "eosio::abi/1.0"; - vector types; - vector structs; - vector actions; - vector tables; - vector ricardian_clauses; - vector error_messages; - extensions_type abi_extensions; + string version = ""; + vector types; + vector structs; + vector actions; + vector tables; + vector ricardian_clauses; + vector error_messages; + extensions_type abi_extensions; + may_not_exist> variants; }; abi_def eosio_contract_abi(const abi_def& eosio_system_abi); @@ -121,6 +131,33 @@ vector common_type_defs(); } } /// namespace eosio::chain +namespace fc { + +template +datastream& operator << (datastream& s, const eosio::chain::may_not_exist& v) { + raw::pack(s, v.value); + return s; +} + +template +datastream& operator >> (datastream& s, eosio::chain::may_not_exist& v) { + if (s.remaining()) + raw::unpack(s, v.value); + return s; +} + +template +void to_variant(const eosio::chain::may_not_exist& e, fc::variant& v) { + to_variant( e.value, v); +} + +template +void from_variant(const fc::variant& v, eosio::chain::may_not_exist& e) { + from_variant( v, e.value ); +} + +} // namespace fc + FC_REFLECT( eosio::chain::type_def , (new_type_name)(type) ) FC_REFLECT( eosio::chain::field_def , (name)(type) ) FC_REFLECT( eosio::chain::struct_def , (name)(base)(fields) ) @@ -128,5 +165,6 @@ FC_REFLECT( eosio::chain::action_def , (name)(type)(ricard FC_REFLECT( eosio::chain::table_def , (name)(index_type)(key_names)(key_types)(type) ) FC_REFLECT( eosio::chain::clause_pair , (id)(body) ) FC_REFLECT( eosio::chain::error_message , (error_code)(error_msg) ) +FC_REFLECT( eosio::chain::variant_def , (name)(types) ) FC_REFLECT( eosio::chain::abi_def , (version)(types)(structs)(actions)(tables) - (ricardian_clauses)(error_messages)(abi_extensions) ) + (ricardian_clauses)(error_messages)(abi_extensions)(variants) ) diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index d53455f2a79..a8e9ba3e520 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -53,14 +53,14 @@ struct abi_serializer { return _binary_to_variant(type, binary, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); } bytes variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time)const { - return _variant_to_binary(type, var, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); + return _variant_to_binary(type, var, true, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); } fc::variant binary_to_variant(const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time)const { return _binary_to_variant(type, binary, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); } void variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time)const { - _variant_to_binary(type, var, ds, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); + _variant_to_binary(type, var, ds, true, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); } template @@ -95,28 +95,30 @@ struct abi_serializer { private: - map typedefs; - map structs; - map actions; - map tables; - map error_messages; + map typedefs; + map structs; + map actions; + map tables; + map error_messages; + map variants; map> built_in_types; void configure_built_in_types(); fc::variant _binary_to_variant(const type_name& type, const bytes& binary, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - bytes _variant_to_binary(const type_name& type, const fc::variant& var, + bytes _variant_to_binary(const type_name& type, const fc::variant& var, bool allow_extensions, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; fc::variant _binary_to_variant(const type_name& type, fc::datastream& binary, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - void _variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, + void _variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, bool allow_extensions, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; void _binary_to_variant(const type_name& type, fc::datastream& stream, fc::mutable_variant_object& obj, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + static type_name _remove_bin_extension(const type_name& type); bool _is_type(const type_name& type, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; void validate(const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; @@ -470,7 +472,7 @@ namespace impl { if (abi.valid()) { auto type = abi->get_action_type(act.name); if (!type.empty()) { - act.data = std::move( abi->_variant_to_binary( type, data, recursion_depth, deadline, max_serialization_time )); + act.data = std::move( abi->_variant_to_binary( type, data, true, recursion_depth, deadline, max_serialization_time )); valid_empty_data = act.data.empty(); } } diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index ef44ca7e0df..8a4f98a7caa 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -572,6 +572,8 @@ class apply_context { uint64_t next_recv_sequence( account_name receiver ); uint64_t next_auth_sequence( account_name actor ); + void add_ram_usage( account_name account, int64_t ram_delta ); + private: void validate_referenced_accounts( const transaction& t )const; @@ -607,6 +609,7 @@ class apply_context { vector _inline_actions; ///< queued inline messages vector _cfa_inline_actions; ///< queued inline messages std::ostringstream _pending_console_output; + flat_set _account_ram_deltas; ///< flat_set of account_delta so json is an array of objects //bytes _cached_trx; }; diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 539072ad785..b0fafdc5ae7 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -74,6 +74,7 @@ namespace eosio { namespace chain { validation_mode block_validation_mode = validation_mode::FULL; flat_set resource_greylist; + flat_set trusted_producers; }; enum class block_status { @@ -183,8 +184,9 @@ namespace eosio { namespace chain { time_point fork_db_head_block_time()const; account_name fork_db_head_block_producer()const; - time_point pending_block_time()const; - block_state_ptr pending_block_state()const; + time_point pending_block_time()const; + block_state_ptr pending_block_state()const; + optional pending_producer_block_id()const; const producer_schedule_type& active_producers()const; const producer_schedule_type& pending_producers()const; @@ -306,4 +308,5 @@ FC_REFLECT( eosio::chain::controller::config, (genesis) (wasm_runtime) (resource_greylist) + (trusted_producers) ) diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index fd49e930af5..91467e746f6 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -416,6 +416,10 @@ namespace eosio { namespace chain { 3015013, "Unpack data exception" ) FC_DECLARE_DERIVED_EXCEPTION( pack_exception, abi_exception, 3015014, "Pack data exception" ) + FC_DECLARE_DERIVED_EXCEPTION( duplicate_abi_variant_def_exception, abi_exception, + 3015015, "Duplicate variant definition in the ABI" ) + FC_DECLARE_DERIVED_EXCEPTION( unsupported_abi_version_exception, abi_exception, + 3015016, "ABI has an unsupported version" ) FC_DECLARE_DERIVED_EXCEPTION( contract_exception, chain_exception, 3160000, "Contract exception" ) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 41fb8f079a6..ad02baf5bac 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -10,18 +10,33 @@ namespace eosio { namespace chain { + struct account_delta { + account_delta( const account_name& n, int64_t d):account(n),delta(d){} + account_delta(){} + + account_name account; + int64_t delta = 0; + + friend bool operator<( const account_delta& lhs, const account_delta& rhs ) { return lhs.account < rhs.account; } + }; + struct base_action_trace { base_action_trace( const action_receipt& r ):receipt(r){} base_action_trace(){} action_receipt receipt; action act; + bool context_free = false; fc::microseconds elapsed; uint64_t cpu_usage = 0; string console; uint64_t total_cpu_usage = 0; /// total of inline_traces[x].cpu_usage + cpu_usage transaction_id_type trx_id; ///< the transaction that generated this action + uint32_t block_num = 0; + block_timestamp_type block_time; + fc::optional producer_block_id; + flat_set account_ram_deltas; }; struct action_trace : public base_action_trace { @@ -35,6 +50,9 @@ namespace eosio { namespace chain { struct transaction_trace { transaction_id_type id; + uint32_t block_num = 0; + block_timestamp_type block_time; + fc::optional producer_block_id; fc::optional receipt; fc::microseconds elapsed; uint64_t net_usage = 0; @@ -48,11 +66,16 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain +FC_REFLECT( eosio::chain::account_delta, + (account)(delta) ) + FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) ) + (receipt)(act)(context_free)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) + (block_num)(block_time)(producer_block_id)(account_ram_deltas) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) -FC_REFLECT( eosio::chain::transaction_trace, (id)(receipt)(elapsed)(net_usage)(scheduled) +FC_REFLECT( eosio::chain::transaction_trace, (id)(block_num)(block_time)(producer_block_id) + (receipt)(elapsed)(net_usage)(scheduled) (action_traces)(failed_dtrx_trace)(except) ) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 22e8eae36d6..3175994dedd 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -38,8 +38,6 @@ namespace eosio { namespace chain { void pause_billing_timer(); void resume_billing_timer(); - void add_ram_usage( account_name account, int64_t ram_delta ); - uint32_t update_billed_cpu_time( fc::time_point now ); std::tuple max_bandwidth_billed_accounts_can_pay( bool force_elastic_limits = false )const; @@ -49,6 +47,8 @@ namespace eosio { namespace chain { friend struct controller_impl; friend class apply_context; + void add_ram_usage( account_name account, int64_t ram_delta ); + void dispatch_action( action_trace& trace, const action& a, account_name receiver, bool context_free = false, uint32_t recurse_depth = 0 ); inline void dispatch_action( action_trace& trace, const action& a, bool context_free = false ) { dispatch_action(trace, a, a.account, context_free); diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 82bd93233e6..17ac03fddfe 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -54,6 +54,7 @@ namespace eosio { namespace chain { enum class vm_type { wavm, binaryen, + wabt }; wasm_interface(vm_type vm); @@ -76,4 +77,4 @@ namespace eosio{ namespace chain { std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime); }} -FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(binaryen) ) +FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(binaryen)(wabt) ) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 496cf3b435b..df28d79a21b 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,8 @@ namespace eosio { namespace chain { runtime_interface = std::make_unique(); else if(vm == wasm_interface::vm_type::binaryen) runtime_interface = std::make_unique(); + else if(vm == wasm_interface::vm_type::wabt) + runtime_interface = std::make_unique(); else EOS_THROW(wasm_exception, "wasm_interface_impl fall through"); } @@ -95,7 +98,8 @@ namespace eosio { namespace chain { #define _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_WAVM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - _REGISTER_BINARYEN_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) + _REGISTER_BINARYEN_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ + _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) #define _REGISTER_INTRINSIC4(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG ) diff --git a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp new file mode 100644 index 00000000000..5be568d4b01 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp @@ -0,0 +1,712 @@ +#pragma once + +#include +#include +#include +#include +#include + +//wabt includes +#include +#include +#include + +namespace eosio { namespace chain { namespace webassembly { namespace wabt_runtime { + +using namespace fc; +using namespace wabt; +using namespace wabt::interp; +using namespace eosio::chain::webassembly::common; + +struct wabt_apply_instance_vars { + Memory* memory; + apply_context& ctx; + + char* get_validated_pointer(uint32_t offset, uint32_t size) { + EOS_ASSERT(memory, wasm_execution_error, "access violation"); + EOS_ASSERT(offset + size <= memory->data.size() && offset + size >= offset, wasm_execution_error, "access violation"); + return memory->data.data() + offset; + } +}; + +struct intrinsic_registrator { + using intrinsic_fn = TypedValue(*)(wabt_apply_instance_vars&, const TypedValues&); + + struct intrinsic_func_info { + FuncSignature sig; + intrinsic_fn func; + }; + + static auto& get_map(){ + static map> _map; + return _map; + }; + + intrinsic_registrator(const char* mod, const char* name, const FuncSignature& sig, intrinsic_fn fn) { + get_map()[string(mod)][string(name)] = intrinsic_func_info{sig, fn}; + } +}; + +class wabt_runtime : public eosio::chain::wasm_runtime_interface { + public: + wabt_runtime(); + std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; + + private: + wabt::ReadBinaryOptions read_binary_options; //note default ctor will look at each option in feature.def and default to DISABLED for the feature +}; + +/** + * class to represent an in-wasm-memory array + * it is a hint to the transcriber that the next parameter will + * be a size (data bytes length) and that the pair are validated together + * This triggers the template specialization of intrinsic_invoker_impl + * @tparam T + */ +template +inline array_ptr array_ptr_impl (wabt_apply_instance_vars& vars, uint32_t ptr, uint32_t length) +{ + EOS_ASSERT( length < INT_MAX/(uint32_t)sizeof(T), binaryen_exception, "length will overflow" ); + return array_ptr((T*)(vars.get_validated_pointer(ptr, length * (uint32_t)sizeof(T)))); +} + +/** + * class to represent an in-wasm-memory char array that must be null terminated + */ +inline null_terminated_ptr null_terminated_ptr_impl(wabt_apply_instance_vars& vars, uint32_t ptr) +{ + char *value = vars.get_validated_pointer(ptr, 1); + const char* p = value; + const char* const top_of_memory = vars.memory->data.data() + vars.memory->data.size(); + while(p < top_of_memory) + if(*p++ == '\0') + return null_terminated_ptr(value); + + FC_THROW_EXCEPTION(wasm_execution_error, "unterminated string"); +} + + +template +struct is_reference_from_value { + static constexpr bool value = false; +}; + +template<> +struct is_reference_from_value { + static constexpr bool value = true; +}; + +template<> +struct is_reference_from_value { + static constexpr bool value = true; +}; + +template +constexpr bool is_reference_from_value_v = is_reference_from_value::value; + +template +T convert_literal_to_native(const TypedValue& v); + +template<> +inline double convert_literal_to_native(const TypedValue& v) { + return v.get_f64(); +} + +template<> +inline float convert_literal_to_native(const TypedValue& v) { + return v.get_f32(); +} + +template<> +inline int64_t convert_literal_to_native(const TypedValue& v) { + return v.get_i64(); +} + +template<> +inline uint64_t convert_literal_to_native(const TypedValue& v) { + return v.get_i64(); +} + +template<> +inline int32_t convert_literal_to_native(const TypedValue& v) { + return v.get_i32(); +} + +template<> +inline uint32_t convert_literal_to_native(const TypedValue& v) { + return v.get_i32(); +} + +template<> +inline bool convert_literal_to_native(const TypedValue& v) { + return v.get_i32(); +} + +template<> +inline name convert_literal_to_native(const TypedValue& v) { + int64_t val = v.get_i64(); + return name(val); +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const uint32_t &val) { + TypedValue tv(Type::I32); + tv.set_i32(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const int32_t &val) { + TypedValue tv(Type::I32); + tv.set_i32(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const uint64_t &val) { + TypedValue tv(Type::I64); + tv.set_i64(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const int64_t &val) { + TypedValue tv(Type::I64); + tv.set_i64(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const float &val) { + TypedValue tv(Type::F32); + tv.set_f32(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const double &val) { + TypedValue tv(Type::F64); + tv.set_f64(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const name &val) { + TypedValue tv(Type::I64); + tv.set_i64(val.value); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars& vars, char* ptr) { + const char* base = vars.memory->data.data(); + const char* top_of_memory = base + vars.memory->data.size(); + EOS_ASSERT(ptr >= base && ptr < top_of_memory, wasm_execution_error, "returning pointer not in linear memory"); + Value v; + v.i32 = (int)(ptr - base); + return TypedValue(Type::I32, v); +} + +struct void_type { +}; + +template +struct wabt_to_value_type; + +template<> +struct wabt_to_value_type { + static constexpr auto value = Type::F32; +}; + +template<> +struct wabt_to_value_type { + static constexpr auto value = Type::F64; +}; +template<> +struct wabt_to_value_type { + static constexpr auto value = Type::I32; +}; +template<> +struct wabt_to_value_type { + static constexpr auto value = Type::I64; +}; + +template +constexpr auto wabt_to_value_type_v = wabt_to_value_type::value; + +template +struct wabt_to_rvalue_type; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::F32; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::F64; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I32; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I64; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I64; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I64; +}; + +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I32; +}; + +template +constexpr auto wabt_to_rvalue_type_v = wabt_to_rvalue_type::value; + +template +struct wabt_function_type_provider; + +template +struct wabt_function_type_provider { + static FuncSignature type() { + return FuncSignature({wabt_to_value_type_v ...}, {wabt_to_rvalue_type_v}); + } +}; +template +struct wabt_function_type_provider { + static FuncSignature type() { + return FuncSignature({wabt_to_value_type_v ...}, {}); + } +}; + +/** + * Forward declaration of the invoker type which transcribes arguments to/from a native method + * and injects the appropriate checks + * + * @tparam Ret - the return type of the native function + * @tparam NativeParameters - a std::tuple of the remaining native parameters to transcribe + * @tparam WasmParameters - a std::tuple of the transribed parameters + */ +template +struct intrinsic_invoker_impl; + +/** + * Specialization for the fully transcribed signature + * @tparam Ret - the return type of the native function + */ +template +struct intrinsic_invoker_impl> { + using next_method_type = Ret (*)(wabt_apply_instance_vars&, const TypedValues&, int); + + template + static TypedValue invoke(wabt_apply_instance_vars& vars, const TypedValues& args) { + return convert_native_to_literal(vars, Method(vars, args, args.size() - 1)); + } + + template + static const auto fn() { + return invoke; + } +}; + +/** + * specialization of the fully transcribed signature for void return values + * @tparam Translated - the arguments to the wasm function + */ +template<> +struct intrinsic_invoker_impl> { + using next_method_type = void_type (*)(wabt_apply_instance_vars&, const TypedValues&, int); + + template + static TypedValue invoke(wabt_apply_instance_vars& vars, const TypedValues& args) { + Method(vars, args, args.size() - 1); + return TypedValue(Type::Void); + } + + template + static const auto fn() { + return invoke; + } +}; + +/** + * Sepcialization for transcribing a simple type in the native method signature + * @tparam Ret - the return type of the native method + * @tparam Input - the type of the native parameter to transcribe + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, Input, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + auto& last = args.at(offset); + auto native = convert_literal_to_native(last); + return Then(vars, native, rest..., args, (uint32_t)offset - 1); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a array_ptr type in the native method signature + * This type transcribes into 2 wasm parameters: a pointer and byte length and checks the validity of that memory + * range before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl, size_t, Inputs...>> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, size_t, Inputs..., const TypedValues&, int); + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); + uint32_t ptr = args.at((uint32_t)offset - 1).get_i32(); + size_t length = args.at((uint32_t)offset).get_i32(); + T* base = array_ptr_impl(vars, ptr, length); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned array of const values" ); + std::vector > copy(length > 0 ? length : 1); + T* copy_ptr = ©[0]; + memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); + return Then(vars, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); + } + return Then(vars, static_cast>(base), length, rest..., args, (uint32_t)offset - 2); + }; + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); + uint32_t ptr = args.at((uint32_t)offset - 1).get_i32(); + size_t length = args.at((uint32_t)offset).get_i32(); + T* base = array_ptr_impl(vars, ptr, length); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned array of values" ); + std::vector > copy(length > 0 ? length : 1); + T* copy_ptr = ©[0]; + memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); + Ret ret = Then(vars, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); + memcpy( (void*)base, (void*)copy_ptr, length * sizeof(T) ); + return ret; + } + return Then(vars, static_cast>(base), length, rest..., args, (uint32_t)offset - 2); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a null_terminated_ptr type in the native method signature + * This type transcribes 1 wasm parameters: a char pointer which is validated to contain + * a null value before the end of the allocated memory. + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret(*)(wabt_apply_instance_vars&, null_terminated_ptr, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + return Then(vars, null_terminated_ptr_impl(vars, ptr), rest..., args, (uint32_t)offset - 1); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a pair of array_ptr types in the native method signature that share size + * This type transcribes into 3 wasm parameters: 2 pointers and byte length and checks the validity of those memory + * ranges before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl, array_ptr, size_t, Inputs...>> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, array_ptr, size_t, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + uint32_t ptr_t = args.at((uint32_t)offset - 2).get_i32(); + uint32_t ptr_u = args.at((uint32_t)offset - 1).get_i32(); + size_t length = args.at((uint32_t)offset).get_i32(); + static_assert(std::is_same, char>::value && std::is_same, char>::value, "Currently only support array of (const)chars"); + return Then(vars, array_ptr_impl(vars, ptr_t, length), array_ptr_impl(vars, ptr_u, length), length, args, (uint32_t)offset - 3); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing memset parameters + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl, int, size_t>> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, int, size_t, const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, const TypedValues& args, int offset) { + uint32_t ptr = args.at((uint32_t)offset - 2).get_i32(); + uint32_t value = args.at((uint32_t)offset - 1).get_i32(); + size_t length = args.at((uint32_t)offset).get_i32(); + return Then(vars, array_ptr_impl(vars, ptr, length), value, length, args, (uint32_t)offset - 3); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a pointer type in the native method signature + * This type transcribes into an int32 pointer checks the validity of that memory + * range before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, T *, Inputs..., const TypedValues&, int); + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + T* base = array_ptr_impl(vars, ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned const pointer" ); + std::remove_const_t copy; + T* copy_ptr = © + memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); + return Then(vars, copy_ptr, rest..., args, (uint32_t)offset - 1); + } + return Then(vars, base, rest..., args, (uint32_t)offset - 1); + }; + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + T* base = array_ptr_impl(vars, ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned pointer" ); + T copy; + memcpy( (void*)©, (void*)base, sizeof(T) ); + Ret ret = Then(vars, ©, rest..., args, (uint32_t)offset - 1); + memcpy( (void*)base, (void*)©, sizeof(T) ); + return ret; + } + return Then(vars, base, rest..., args, (uint32_t)offset - 1); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a reference to a name which can be passed as a native value + * This type transcribes into a native type which is loaded by value into a + * variable on the stack and then passed by reference to the intrinsic. + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, const name&, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + uint64_t wasm_value = args.at((uint32_t)offset).get_i64(); + auto value = name(wasm_value); + return Then(vars, value, rest..., args, (uint32_t)offset - 1); + } + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a reference to a fc::time_point_sec which can be passed as a native value + * This type transcribes into a native type which is loaded by value into a + * variable on the stack and then passed by reference to the intrinsic. + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, const fc::time_point_sec&, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + uint32_t wasm_value = args.at((uint32_t)offset).get_i32(); + auto value = fc::time_point_sec(wasm_value); + return Then(vars, value, rest..., args, (uint32_t)offset - 1); + } + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + + +/** + * Specialization for transcribing a reference type in the native method signature + * This type transcribes into an int32 pointer checks the validity of that memory + * range before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, T &, Inputs..., const TypedValues&, int); + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + // references cannot be created for null pointers + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); + T* base = array_ptr_impl(vars, ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned const reference" ); + std::remove_const_t copy; + T* copy_ptr = © + memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); + return Then(vars, *copy_ptr, rest..., args, (uint32_t)offset - 1); + } + return Then(vars, *base, rest..., args, (uint32_t)offset - 1); + } + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + // references cannot be created for null pointers + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); + T* base = array_ptr_impl(vars, ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned reference" ); + T copy; + memcpy( (void*)©, (void*)base, sizeof(T) ); + Ret ret = Then(vars, copy, rest..., args, (uint32_t)offset - 1); + memcpy( (void*)base, (void*)©, sizeof(T) ); + return ret; + } + return Then(vars, *base, rest..., args, (uint32_t)offset - 1); + } + + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +extern apply_context* fixme_context; + +/** + * forward declaration of a wrapper class to call methods of the class + */ +template +struct intrinsic_function_invoker { + using impl = intrinsic_invoker_impl>; + + template + static Ret wrapper(wabt_apply_instance_vars& vars, Params... params, const TypedValues&, int) { + class_from_wasm::value(vars.ctx).checktime(); + return (class_from_wasm::value(vars.ctx).*Method)(params...); + } + + template + static const intrinsic_registrator::intrinsic_fn fn() { + return impl::template fn>(); + } +}; + +template +struct intrinsic_function_invoker { + using impl = intrinsic_invoker_impl>; + + template + static void_type wrapper(wabt_apply_instance_vars& vars, Params... params, const TypedValues& args, int offset) { + class_from_wasm::value(vars.ctx).checktime(); + (class_from_wasm::value(vars.ctx).*Method)(params...); + return void_type(); + } + + template + static const intrinsic_registrator::intrinsic_fn fn() { + return impl::template fn>(); + } + +}; + +template +struct intrinsic_function_invoker_wrapper; + +template +struct intrinsic_function_invoker_wrapper { + using type = intrinsic_function_invoker; +}; + +template +struct intrinsic_function_invoker_wrapper { + using type = intrinsic_function_invoker; +}; + +template +struct intrinsic_function_invoker_wrapper { + using type = intrinsic_function_invoker; +}; + +template +struct intrinsic_function_invoker_wrapper { + using type = intrinsic_function_invoker; +}; + +#define __INTRINSIC_NAME(LABEL, SUFFIX) LABEL##SUFFIX +#define _INTRINSIC_NAME(LABEL, SUFFIX) __INTRINSIC_NAME(LABEL,SUFFIX) + +#define _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ + static eosio::chain::webassembly::wabt_runtime::intrinsic_registrator _INTRINSIC_NAME(__wabt_intrinsic_fn, __COUNTER__) (\ + MOD,\ + NAME,\ + eosio::chain::webassembly::wabt_runtime::wabt_function_type_provider::type(),\ + eosio::chain::webassembly::wabt_runtime::intrinsic_function_invoker_wrapper::type::fn<&CLS::METHOD>()\ + );\ + +} } } }// eosio::chain::webassembly::wabt_runtime diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 4049252f8f8..6d3176c7fb1 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include namespace eosio { namespace chain { namespace resource_limits { diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 64683ebc049..dd58f0364ec 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -26,6 +26,9 @@ namespace eosio { namespace chain { undo_session = c.db().start_undo_session(true); } trace->id = id; + trace->block_num = c.pending_block_state()->block_num; + trace->block_time = c.pending_block_time(); + trace->producer_block_id = c.pending_producer_block_id(); executed.reserve( trx.total_actions() ); EOS_ASSERT( trx.transaction_extensions.size() == 0, unsupported_feature, "we don't support any extensions yet" ); } diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index abe5a745766..ac580045277 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -1913,6 +1913,8 @@ std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime) { runtime = eosio::chain::wasm_interface::vm_type::wavm; else if (s == "binaryen") runtime = eosio::chain::wasm_interface::vm_type::binaryen; + else if (s == "wabt") + runtime = eosio::chain::wasm_interface::vm_type::wabt; else in.setstate(std::ios_base::failbit); return in; diff --git a/libraries/chain/webassembly/wabt.cpp b/libraries/chain/webassembly/wabt.cpp new file mode 100644 index 00000000000..bf5e1c9d6c8 --- /dev/null +++ b/libraries/chain/webassembly/wabt.cpp @@ -0,0 +1,99 @@ +#include +#include +#include + +//wabt includes +#include +#include +#include + +namespace eosio { namespace chain { namespace webassembly { namespace wabt_runtime { + +//yep 🤮 +static wabt_apply_instance_vars* static_wabt_vars; + +using namespace wabt; +using namespace wabt::interp; +namespace wasm_constraints = eosio::chain::wasm_constraints; + +class wabt_instantiated_module : public wasm_instantiated_module_interface { + public: + wabt_instantiated_module(std::unique_ptr e, std::vector initial_mem, interp::DefinedModule* mod) : + _env(move(e)), _instatiated_module(mod), _initial_memory(initial_mem), + _executor(_env.get(), nullptr, Thread::Options(64*1024, + wasm_constraints::maximum_call_depth+2)) + { + for(Index i = 0; i < _env->GetGlobalCount(); ++i) { + if(_env->GetGlobal(i)->mutable_ == false) + continue; + _initial_globals.emplace_back(_env->GetGlobal(i), _env->GetGlobal(i)->typed_value); + } + + if(_env->GetMemoryCount()) + _initial_memory_configuration = _env->GetMemory(0)->page_limits; + } + + void apply(apply_context& context) override { + //reset mutable globals + for(const auto& mg : _initial_globals) + mg.first->typed_value = mg.second; + + wabt_apply_instance_vars this_run_vars{nullptr, context}; + static_wabt_vars = &this_run_vars; + + //reset memory to inital size & copy back in initial data + if(_env->GetMemoryCount()) { + Memory* memory = this_run_vars.memory = _env->GetMemory(0); + memory->page_limits = _initial_memory_configuration; + memory->data.resize(_initial_memory_configuration.initial * WABT_PAGE_SIZE); + memset(memory->data.data(), 0, memory->data.size()); + memcpy(memory->data.data(), _initial_memory.data(), _initial_memory.size()); + } + + _params[0].set_i64(uint64_t(context.receiver)); + _params[1].set_i64(uint64_t(context.act.account)); + _params[2].set_i64(uint64_t(context.act.name)); + + ExecResult res = _executor.RunStartFunction(_instatiated_module); + EOS_ASSERT( res.result == interp::Result::Ok, wasm_execution_error, "wabt start function failure (${s})", ("s", ResultToString(res.result)) ); + + res = _executor.RunExportByName(_instatiated_module, "apply", _params); + EOS_ASSERT( res.result == interp::Result::Ok, wasm_execution_error, "wabt execution failure (${s})", ("s", ResultToString(res.result)) ); + } + + private: + std::unique_ptr _env; + DefinedModule* _instatiated_module; //this is owned by the Environment + std::vector _initial_memory; + TypedValues _params{3, TypedValue(Type::I64)}; + std::vector> _initial_globals; + Limits _initial_memory_configuration; + Executor _executor; +}; + +wabt_runtime::wabt_runtime() {} + +std::unique_ptr wabt_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) { + std::unique_ptr env = std::make_unique(); + for(auto it = intrinsic_registrator::get_map().begin() ; it != intrinsic_registrator::get_map().end(); ++it) { + interp::HostModule* host_module = env->AppendHostModule(it->first); + for(auto itf = it->second.begin(); itf != it->second.end(); ++itf) { + host_module->AppendFuncExport(itf->first, itf->second.sig, [fn=itf->second.func](const auto* f, const auto* fs, const auto& args, auto& res) { + TypedValue ret = fn(*static_wabt_vars, args); + if(ret.type != Type::Void) + res[0] = ret; + return interp::Result::Ok; + }); + } + } + + interp::DefinedModule* instantiated_module = nullptr; + wabt::Errors errors; + + wabt::Result res = ReadBinaryInterp(env.get(), code_bytes, code_size, read_binary_options, &errors, &instantiated_module); + EOS_ASSERT( Succeeded(res), wasm_execution_error, "Error building wabt interp: ${e}", ("e", wabt::FormatErrorsToString(errors, Location::Type::Binary)) ); + + return std::make_unique(std::move(env), initial_memory, instantiated_module); +} + +}}}} diff --git a/libraries/chainbase b/libraries/chainbase index 959cb4ddffe..9bfe5043f54 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 959cb4ddffe4316b8f6d7c1f9290400a52ae40c4 +Subproject commit 9bfe5043f5484e00f89387091e9b5beb90b88c62 diff --git a/libraries/fc b/libraries/fc index 62a19a75868..4dc8375d7d3 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 62a19a758682679e3de27d956986eaf8b016465d +Subproject commit 4dc8375d7d3e02ab1177ab5c22835f75b45c845a diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index d502adefd15..38e0827cdb1 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -308,14 +309,17 @@ namespace eosio { namespace testing { try { if( num_blocks_to_producer_before_shutdown > 0 ) produce_blocks( num_blocks_to_producer_before_shutdown ); - BOOST_REQUIRE_EQUAL( validate(), true ); + if (!skip_validate) + BOOST_REQUIRE_EQUAL( validate(), true ); } catch( const fc::exception& e ) { wdump((e.to_detail_string())); } } controller::config vcfg; - validating_tester() { + static controller::config default_config() { + fc::temp_directory tempdir; + controller::config vcfg; vcfg.blocks_dir = tempdir.path() / std::string("v_").append(config::default_blocks_dir_name); vcfg.state_dir = tempdir.path() / std::string("v_").append(config::default_state_dir_name); vcfg.state_size = 1024*1024*8; @@ -332,8 +336,16 @@ namespace eosio { namespace testing { vcfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) vcfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) + vcfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; } + return vcfg; + } + + validating_tester(const flat_set& trusted_producers = flat_set()) { + vcfg = default_config(); + vcfg.trusted_producers = trusted_producers; validating_node = std::make_unique(vcfg); validating_node->startup(); @@ -362,6 +374,14 @@ namespace eosio { namespace testing { return sb; } + signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ ) { + return _produce_block(skip_time, false, skip_flag | 2); + } + + void validate_push_block(const signed_block_ptr& sb) { + validating_node->push_block( sb ); + } + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { control->abort_block(); auto sb = _produce_block(skip_time, true, skip_flag | 2); @@ -393,6 +413,7 @@ namespace eosio { namespace testing { unique_ptr validating_node; uint32_t num_blocks_to_producer_before_shutdown = 0; + bool skip_validate = false; }; /** diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 051d598d2a6..19faea3e420 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -100,6 +100,8 @@ namespace eosio { namespace testing { cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) + cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; else cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; } diff --git a/libraries/wabt b/libraries/wabt new file mode 160000 index 00000000000..2f5382661f7 --- /dev/null +++ b/libraries/wabt @@ -0,0 +1 @@ +Subproject commit 2f5382661f7bf77cf7a70dcf0543a44fd5025910 diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 06b0162fe5b..9b0b17b9d0a 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -17,6 +17,8 @@ add_subdirectory(db_size_api_plugin) #add_subdirectory(faucet_testnet_plugin) add_subdirectory(mongo_db_plugin) add_subdirectory(login_plugin) +add_subdirectory(test_control_plugin) +add_subdirectory(test_control_api_plugin) # Forward variables to top level so packaging picks them up set(CPACK_DEBIAN_PACKAGE_DEPENDS ${CPACK_DEBIAN_PACKAGE_DEPENDS} PARENT_SCOPE) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md new file mode 100644 index 00000000000..d7baf7e4a35 --- /dev/null +++ b/plugins/COMMUNITY.md @@ -0,0 +1,14 @@ +# Community Plugin List + +This file contains a list of community authored plugins for `nodeos`, acting as a directory of the plugins that are available. + +Third parties are encouraged to make pull requests to this file (`develop` branch please) in order to list new plugins. + +| Description | URL | +| ----------- | --- | +| Watch for specific actions and send them to an HTTP URL | https://github.com/eosauthority/eosio-watcher-plugin | +| Kafka | https://github.com/TP-Lab/kafka_plugin | + +## DISCLAIMER: + +The fact that a plugin is listed in this file does not mean the plugin has been reviewed by this repository's maintainers. No warranties are made, i.e. you are at your own risk if you choose to use them. diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 58098501f04..31e576ae4d8 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -85,9 +85,12 @@ void chain_api_plugin::plugin_startup() { CHAIN_RO_CALL(get_block_header_state, 200), CHAIN_RO_CALL(get_account, 200), CHAIN_RO_CALL(get_code, 200), + CHAIN_RO_CALL(get_code_hash, 200), CHAIN_RO_CALL(get_abi, 200), CHAIN_RO_CALL(get_raw_code_and_abi, 200), + CHAIN_RO_CALL(get_raw_abi, 200), CHAIN_RO_CALL(get_table_rows, 200), + CHAIN_RO_CALL(get_table_by_scope, 200), CHAIN_RO_CALL(get_currency_balance, 200), CHAIN_RO_CALL(get_currency_stats, 200), CHAIN_RO_CALL(get_producers, 200), diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 67efb591611..183e7a48ecc 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -212,7 +212,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("blocks-dir", bpo::value()->default_value("blocks"), "the location of the blocks directory (absolute path or relative to application data dir)") ("checkpoint", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") - ("wasm-runtime", bpo::value()->value_name("wavm/binaryen"), "Override default WASM runtime") + ("wasm-runtime", bpo::value()->value_name("wavm/binaryen/wabt"), "Override default WASM runtime") ("abi-serializer-max-time-ms", bpo::value()->default_value(config::default_abi_serializer_max_time_ms), "Override default maximum ABI serialization time allowed in ms") ("chain-state-db-size-mb", bpo::value()->default_value(config::default_state_size / (1024 * 1024)), "Maximum size (in MiB) of the chain state database") @@ -283,6 +283,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip "replace reversible block database with blocks imported from specified file and then exit") ("export-reversible-blocks", bpo::value(), "export reversible block database in portable format into specified file and then exit") + ("trusted-producer", bpo::value>()->composing(), "Indicate a producer whose blocks headers signed by it will be fully validated, but transactions in those validated blocks will be trusted.") ; } @@ -313,6 +314,17 @@ fc::time_point calculate_genesis_timestamp( string tstr ) { return genesis_timestamp; } +void clear_directory_contents( const fc::path& p ) { + using boost::filesystem::directory_iterator; + + if( !fc::is_directory( p ) ) + return; + + for( directory_iterator enditr, itr{p}; itr != enditr; ++itr ) { + fc::remove_all( itr->path() ); + } +} + void chain_plugin::plugin_initialize(const variables_map& options) { ilog("initializing chain plugin"); @@ -332,6 +344,8 @@ void chain_plugin::plugin_initialize(const variables_map& options) { LOAD_VALUE_SET( options, "contract-whitelist", my->chain_config->contract_whitelist ); LOAD_VALUE_SET( options, "contract-blacklist", my->chain_config->contract_blacklist ); + LOAD_VALUE_SET( options, "trusted-producer", my->chain_config->trusted_producers ); + if( options.count( "action-blacklist" )) { const std::vector& acts = options["action-blacklist"].as>(); auto& list = my->chain_config->action_blacklist; @@ -456,11 +470,11 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ilog( "Deleting state database and blocks" ); if( options.at( "truncate-at-block" ).as() > 0 ) wlog( "The --truncate-at-block option does not make sense when deleting all blocks." ); - fc::remove_all( my->chain_config->state_dir ); + clear_directory_contents( my->chain_config->state_dir ); fc::remove_all( my->blocks_dir ); } else if( options.at( "hard-replay-blockchain" ).as()) { ilog( "Hard replay requested: deleting state database" ); - fc::remove_all( my->chain_config->state_dir ); + clear_directory_contents( my->chain_config->state_dir ); auto backup_dir = block_log::repair_log( my->blocks_dir, options.at( "truncate-at-block" ).as()); if( fc::exists( backup_dir / config::reversible_blocks_dir_name ) || options.at( "fix-reversible-blocks" ).as()) { @@ -482,7 +496,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ilog( "Replay requested: deleting state database" ); if( options.at( "truncate-at-block" ).as() > 0 ) wlog( "The --truncate-at-block option does not work for a regular replay of the blockchain." ); - fc::remove_all( my->chain_config->state_dir ); + clear_directory_contents( my->chain_config->state_dir ); if( options.at( "fix-reversible-blocks" ).as()) { if( !recover_reversible_blocks( my->chain_config->blocks_dir / config::reversible_blocks_dir_name, my->chain_config->reversible_cache_size )) { @@ -1104,6 +1118,48 @@ read_only::get_table_rows_result read_only::get_table_rows( const read_only::get } } +read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_only::get_table_by_scope_params& p )const { + const auto& d = db.db(); + const auto& idx = d.get_index(); + decltype(idx.lower_bound(boost::make_tuple(0, 0, 0))) lower; + decltype(idx.upper_bound(boost::make_tuple(0, 0, 0))) upper; + + if (p.lower_bound.size()) { + uint64_t scope = convert_to_type(p.lower_bound, "lower_bound scope"); + lower = idx.lower_bound( boost::make_tuple(p.code, scope, p.table)); + } else { + lower = idx.lower_bound(boost::make_tuple(p.code, 0, p.table)); + } + if (p.upper_bound.size()) { + uint64_t scope = convert_to_type(p.upper_bound, "upper_bound scope"); + upper = idx.lower_bound( boost::make_tuple(p.code, scope, 0)); + } else { + upper = idx.lower_bound(boost::make_tuple((uint64_t)p.code + 1, 0, 0)); + } + + auto end = fc::time_point::now() + fc::microseconds(1000 * 10); /// 10ms max time + unsigned int count = 0; + auto itr = lower; + read_only::get_table_by_scope_result result; + for (; itr != upper; ++itr) { + if (p.table && itr->table != p.table) { + if (fc::time_point::now() > end) { + break; + } + continue; + } + result.rows.push_back({itr->code, itr->scope, itr->table, itr->payer, itr->count}); + if (++count == p.limit || fc::time_point::now() > end) { + ++itr; + break; + } + } + if (itr != upper) { + result.more = (string)itr->scope; + } + return result; +} + vector read_only::get_currency_balance( const read_only::get_currency_balance_params& p )const { const abi_def abi = eosio::chain_apis::get_abi( db, p.code ); @@ -1499,6 +1555,19 @@ read_only::get_code_results read_only::get_code( const get_code_params& params ) return result; } +read_only::get_code_hash_results read_only::get_code_hash( const get_code_hash_params& params )const { + get_code_hash_results result; + result.account_name = params.account_name; + const auto& d = db.db(); + const auto& accnt = d.get( params.account_name ); + + if( accnt.code.size() ) { + result.code_hash = fc::sha256::hash( accnt.code.data(), accnt.code.size() ); + } + + return result; +} + read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const get_raw_code_and_abi_params& params)const { get_raw_code_and_abi_results result; result.account_name = params.account_name; @@ -1511,6 +1580,20 @@ read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const g return result; } +read_only::get_raw_abi_results read_only::get_raw_abi( const get_raw_abi_params& params )const { + get_raw_abi_results result; + result.account_name = params.account_name; + + const auto& d = db.db(); + const auto& accnt = d.get(params.account_name); + result.abi_hash = fc::sha256::hash( accnt.abi.data(), accnt.abi.size() ); + result.code_hash = fc::sha256::hash( accnt.code.data(), accnt.code.size() ); + if( !params.abi_hash || *params.abi_hash != result.abi_hash ) + result.abi = blob{{accnt.abi.begin(), accnt.abi.end()}}; + + return result; +} + read_only::get_account_results read_only::get_account( const get_account_params& params )const { get_account_results result; result.account_name = params.account_name; @@ -1561,16 +1644,18 @@ read_only::get_account_results read_only::get_account( const get_account_params& const auto token_code = N(eosio.token); + auto core_symbol = extract_core_symbol(); + const auto* t_id = d.find(boost::make_tuple( token_code, params.account_name, N(accounts) )); if( t_id != nullptr ) { const auto &idx = d.get_index(); - auto it = idx.find(boost::make_tuple( t_id->id, symbol().to_symbol_code() )); + auto it = idx.find(boost::make_tuple( t_id->id, core_symbol.to_symbol_code() )); if( it != idx.end() && it->value.size() >= sizeof(asset) ) { asset bal; fc::datastream ds(it->value.data(), it->value.size()); fc::raw::unpack(ds, bal); - if( bal.get_symbol().valid() && bal.get_symbol() == symbol() ) { + if( bal.get_symbol().valid() && bal.get_symbol() == core_symbol ) { result.core_liquid_balance = bal; } } @@ -1683,6 +1768,46 @@ read_only::get_transaction_id_result read_only::get_transaction_id( const read_o return params.id(); } +namespace detail { + struct ram_market_exchange_state_t { + asset ignore1; + asset ignore2; + double ignore3; + asset core_symbol; + double ignore4; + }; +} + +chain::symbol read_only::extract_core_symbol()const { + symbol core_symbol; // Default to CORE_SYMBOL if the appropriate data structure cannot be found in the system contract table data + + // The following code makes assumptions about the contract deployed on eosio account (i.e. the system contract) and how it stores its data. + const auto& d = db.db(); + const auto* t_id = d.find(boost::make_tuple( N(eosio), N(eosio), N(rammarket) )); + if( t_id != nullptr ) { + const auto &idx = d.get_index(); + auto it = idx.find(boost::make_tuple( t_id->id, eosio::chain::string_to_symbol_c(4,"RAMCORE") )); + if( it != idx.end() ) { + detail::ram_market_exchange_state_t ram_market_exchange_state; + + fc::datastream ds( it->value.data(), it->value.size() ); + + try { + fc::raw::unpack(ds, ram_market_exchange_state); + } catch( ... ) { + return core_symbol; + } + + if( ram_market_exchange_state.core_symbol.get_symbol().valid() ) { + core_symbol = ram_market_exchange_state.core_symbol.get_symbol(); + } + } + } + + return core_symbol; +} } // namespace chain_apis } // namespace eosio + +FC_REFLECT( eosio::chain_apis::detail::ram_market_exchange_state_t, (ignore1)(ignore2)(ignore3)(core_symbol)(ignore4) ) diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 2fd665d6255..5a1f7fc3ff8 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -152,6 +152,15 @@ class read_only { bool code_as_wasm = false; }; + struct get_code_hash_results { + name account_name; + fc::sha256 code_hash; + }; + + struct get_code_hash_params { + name account_name; + }; + struct get_abi_results { name account_name; optional abi; @@ -171,10 +180,24 @@ class read_only { name account_name; }; + struct get_raw_abi_params { + name account_name; + optional abi_hash; + }; + + struct get_raw_abi_results { + name account_name; + fc::sha256 code_hash; + fc::sha256 abi_hash; + optional abi; + }; + get_code_results get_code( const get_code_params& params )const; + get_code_hash_results get_code_hash( const get_code_hash_params& params )const; get_abi_results get_abi( const get_abi_params& params )const; get_raw_code_and_abi_results get_raw_code_and_abi( const get_raw_code_and_abi_params& params)const; + get_raw_abi_results get_raw_abi( const get_raw_abi_params& params)const; @@ -250,6 +273,27 @@ class read_only { get_table_rows_result get_table_rows( const get_table_rows_params& params )const; + struct get_table_by_scope_params { + name code; // mandatory + name table = 0; // optional, act as filter + string lower_bound; // lower bound of scope, optional + string upper_bound; // upper bound of scope, optional + uint32_t limit = 10; + }; + struct get_table_by_scope_result_row { + name code; + name scope; + name table; + name payer; + uint32_t count; + }; + struct get_table_by_scope_result { + vector rows; + string more; ///< fill lower_bound with this value to fetch more rows + }; + + get_table_by_scope_result get_table_by_scope( const get_table_by_scope_params& params )const; + struct get_currency_balance_params { name code; name account; @@ -468,6 +512,8 @@ class read_only { return result; } + chain::symbol extract_core_symbol()const; + friend struct resolver_factory; }; @@ -624,6 +670,10 @@ FC_REFLECT( eosio::chain_apis::read_write::push_transaction_results, (transactio FC_REFLECT( eosio::chain_apis::read_only::get_table_rows_params, (json)(code)(scope)(table)(table_key)(lower_bound)(upper_bound)(limit)(key_type)(index_position)(encode_type) ) FC_REFLECT( eosio::chain_apis::read_only::get_table_rows_result, (rows)(more) ); +FC_REFLECT( eosio::chain_apis::read_only::get_table_by_scope_params, (code)(table)(lower_bound)(upper_bound)(limit) ) +FC_REFLECT( eosio::chain_apis::read_only::get_table_by_scope_result_row, (code)(scope)(table)(payer)(count)); +FC_REFLECT( eosio::chain_apis::read_only::get_table_by_scope_result, (rows)(more) ); + FC_REFLECT( eosio::chain_apis::read_only::get_currency_balance_params, (code)(account)(symbol)); FC_REFLECT( eosio::chain_apis::read_only::get_currency_stats_params, (code)(symbol)); FC_REFLECT( eosio::chain_apis::read_only::get_currency_stats_result, (supply)(max_supply)(issuer)); @@ -642,12 +692,16 @@ FC_REFLECT( eosio::chain_apis::read_only::get_account_results, (core_liquid_balance)(ram_quota)(net_weight)(cpu_weight)(net_limit)(cpu_limit)(ram_usage)(permissions) (total_resources)(self_delegated_bandwidth)(refund_request)(voter_info) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_results, (account_name)(code_hash)(wast)(wasm)(abi) ) +FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_results, (account_name)(code_hash) ) FC_REFLECT( eosio::chain_apis::read_only::get_abi_results, (account_name)(abi) ) FC_REFLECT( eosio::chain_apis::read_only::get_account_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_params, (account_name)(code_as_wasm) ) +FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_abi_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_raw_code_and_abi_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_raw_code_and_abi_results, (account_name)(wasm)(abi) ) +FC_REFLECT( eosio::chain_apis::read_only::get_raw_abi_params, (account_name)(abi_hash) ) +FC_REFLECT( eosio::chain_apis::read_only::get_raw_abi_results, (account_name)(code_hash)(abi_hash)(abi) ) FC_REFLECT( eosio::chain_apis::read_only::producer_info, (producer_name) ) FC_REFLECT( eosio::chain_apis::read_only::abi_json_to_bin_params, (code)(action)(args) ) FC_REFLECT( eosio::chain_apis::read_only::abi_json_to_bin_result, (binargs) ) diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index f6505beaa92..3c3650b9e59 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -10,7 +10,7 @@ #include #include -namespace eosio { +namespace eosio { using namespace chain; using boost::signals2::scoped_connection; @@ -51,7 +51,7 @@ namespace eosio { indexed_by< ordered_unique, member>, ordered_unique, member>, - ordered_unique, + ordered_unique, composite_key< action_history_object, member, member @@ -64,7 +64,7 @@ namespace eosio { account_history_object, indexed_by< ordered_unique, member>, - ordered_unique, + ordered_unique, composite_key< account_history_object, member, member @@ -213,7 +213,7 @@ namespace eosio { uint64_t asn = 0; if( itr != idx.begin() ) --itr; - if( itr->account == n ) + if( itr->account == n ) asn = itr->account_sequence_num + 1; //idump((n)(act.receipt.global_sequence)(asn)); @@ -268,7 +268,7 @@ namespace eosio { aho.block_time = chain.pending_block_time(); aho.trx_id = at.trx_id; }); - + auto aset = account_set( at ); for( auto a : aset ) { record_account_action( a, at ); @@ -313,7 +313,7 @@ namespace eosio { if( options.count( "filter-on" )) { auto fo = options.at( "filter-on" ).as>(); for( auto& s : fo ) { - if( s == "*" ) { + if( s == "*" || s == "\"*\"" ) { my->bypass_filter = true; wlog( "--filter-on * enabled. This can fill shared_mem, causing nodeos to stop." ); break; @@ -366,7 +366,7 @@ namespace eosio { - namespace history_apis { + namespace history_apis { read_only::get_actions_result read_only::get_actions( const read_only::get_actions_params& params )const { edump((params)); auto& chain = history->chain_plug->chain(); @@ -388,7 +388,7 @@ namespace eosio { pos = itr->account_sequence_num+1; } else if( itr != idx.begin() ) --itr; - if( itr->account == n ) + if( itr->account == n ) pos = itr->account_sequence_num + 1; } @@ -440,13 +440,31 @@ namespace eosio { read_only::get_transaction_result read_only::get_transaction( const read_only::get_transaction_params& p )const { auto& chain = history->chain_plug->chain(); const auto abi_serializer_max_time = history->chain_plug->get_abi_serializer_max_time(); - auto short_id = fc::variant(p.id).as_string().substr(0,8); + + transaction_id_type input_id; + auto input_id_length = p.id.size(); + try { + FC_ASSERT( input_id_length <= 64, "hex string is too long to represent an actual transaction id" ); + FC_ASSERT( input_id_length >= 8, "hex string representing transaction id should be at least 8 characters long to avoid excessive collisions" ); + input_id = transaction_id_type(p.id); + } EOS_RETHROW_EXCEPTIONS(transaction_id_type_exception, "Invalid transaction ID: ${transaction_id}", ("transaction_id", p.id)) + + auto txn_id_matched = [&input_id, input_id_size = input_id_length/2, no_half_byte_at_end = (input_id_length % 2 == 0)] + ( const transaction_id_type &id ) -> bool // hex prefix comparison + { + bool whole_byte_prefix_matches = memcmp( input_id.data(), id.data(), input_id_size ) == 0; + if( !whole_byte_prefix_matches || no_half_byte_at_end ) + return whole_byte_prefix_matches; + + // check if half byte at end of specified part of input_id matches + return (*(input_id.data() + input_id_size) & 0xF0) == (*(id.data() + input_id_size) & 0xF0); + }; const auto& db = chain.db(); const auto& idx = db.get_index(); - auto itr = idx.lower_bound( boost::make_tuple(p.id) ); + auto itr = idx.lower_bound( boost::make_tuple( input_id ) ); - bool in_history = (itr != idx.end() && fc::variant(itr->trx_id).as_string().substr(0,8) == short_id ); + bool in_history = (itr != idx.end() && txn_id_matched(itr->trx_id) ); if( !in_history && !p.block_num_hint ) { EOS_THROW(tx_not_found, "Transaction ${id} not found in history and no block hint was given", ("id",p.id)); @@ -454,12 +472,9 @@ namespace eosio { get_transaction_result result; - if (in_history) { - result.id = p.id; - result.last_irreversible_block = chain.last_irreversible_block_num(); - - + if( in_history ) { result.id = itr->trx_id; + result.last_irreversible_block = chain.last_irreversible_block_num(); result.block_num = itr->block_num; result.block_time = itr->block_time; @@ -509,7 +524,7 @@ namespace eosio { if (receipt.trx.contains()) { auto& pt = receipt.trx.get(); auto mtrx = transaction_metadata(pt); - if (fc::variant(mtrx.id).as_string().substr(0, 8) == short_id) { + if( txn_id_matched(mtrx.id) ) { result.id = mtrx.id; result.last_irreversible_block = chain.last_irreversible_block_num(); result.block_num = *p.block_num_hint; @@ -522,7 +537,7 @@ namespace eosio { } } else { auto& id = receipt.trx.get(); - if (fc::variant(id).as_string().substr(0, 8) == short_id) { + if( txn_id_matched(id) ) { result.id = id; result.last_irreversible_block = chain.last_irreversible_block_num(); result.block_num = *p.block_num_hint; diff --git a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp index 402c0e3966d..b6801b30a29 100644 --- a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp +++ b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp @@ -30,18 +30,6 @@ class read_only { : history(history) {} - /* - struct get_transaction_params { - chain::transaction_id_type transaction_id; - }; - struct get_transaction_results { - chain::transaction_id_type transaction_id; - fc::variant transaction; - }; - get_transaction_results get_transaction(const get_transaction_params& params) const; - */ - - struct get_actions_params { chain::account_name account_name; optional pos; /// a absolute sequence positon -1 is the end/last action @@ -67,7 +55,7 @@ class read_only { struct get_transaction_params { - transaction_id_type id; + string id; optional block_num_hint; }; @@ -81,7 +69,7 @@ class read_only { }; get_transaction_result get_transaction( const get_transaction_params& )const; - + @@ -120,13 +108,13 @@ class read_only { /** * This plugin tracks all actions and keys associated with a set of configured accounts. It enables - * wallets to paginate queries for history. + * wallets to paginate queries for history. * * An action will be included in the account's history if any of the following: * - receiver * - any account named in auth list * - * A key will be linked to an account if the key is referneced in authorities of updateauth or newaccount + * A key will be linked to an account if the key is referneced in authorities of updateauth or newaccount */ class history_plugin : public plugin { public: diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index c971f9dd040..6a380da0d03 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -3,6 +3,7 @@ * @copyright defined in eos/LICENSE.txt */ #include +#include #include #include @@ -43,6 +44,11 @@ namespace eosio { using std::shared_ptr; using websocketpp::connection_hdl; + static http_plugin_defaults current_http_plugin_defaults; + + void http_plugin::set_defaults(const http_plugin_defaults config) { + current_http_plugin_defaults = config; + } namespace detail { @@ -79,9 +85,42 @@ namespace eosio { static const long timeout_open_handshake = 0; }; + + struct asio_local_with_stub_log : public websocketpp::config::asio { + typedef asio_local_with_stub_log type; + typedef asio base; + + typedef base::concurrency_type concurrency_type; + + typedef base::request_type request_type; + typedef base::response_type response_type; + + typedef base::message_type message_type; + typedef base::con_msg_manager_type con_msg_manager_type; + typedef base::endpoint_msg_manager_type endpoint_msg_manager_type; + + typedef websocketpp::log::stub elog_type; + typedef websocketpp::log::stub alog_type; + + typedef base::rng_type rng_type; + + struct transport_config : public base::transport_config { + typedef type::concurrency_type concurrency_type; + typedef type::alog_type alog_type; + typedef type::elog_type elog_type; + typedef type::request_type request_type; + typedef type::response_type response_type; + typedef websocketpp::transport::asio::basic_socket::local_endpoint socket_type; + }; + + typedef websocketpp::transport::asio::local_endpoint transport_type; + + static const long timeout_open_handshake = 0; + }; } using websocket_server_type = websocketpp::server>; + using websocket_local_server_type = websocketpp::server; using websocket_server_tls_type = websocketpp::server>; using ssl_context_ptr = websocketpp::lib::shared_ptr; @@ -105,9 +144,16 @@ namespace eosio { websocket_server_tls_type https_server; + optional unix_endpoint; + websocket_local_server_type unix_server; + bool validate_host; set valid_hosts; + string unix_socket_path_option_name = "unix-socket-path"; + string http_server_address_option_name = "http-server-address"; + string https_server_address_option_name = "https-server-address"; + bool host_port_is_valid( const std::string& header_host_port, const string& endpoint_local_host_port ) { return !validate_host || header_host_port == endpoint_local_host_port || valid_hosts.find(header_host_port) != valid_hosts.end(); } @@ -163,7 +209,7 @@ namespace eosio { } template - static void handle_exception(typename websocketpp::server>::connection_ptr con) { + static void handle_exception(typename websocketpp::server::connection_ptr con) { string err = "Internal Service error, http: "; try { con->set_status( websocketpp::http::status_code::internal_server_error ); @@ -195,18 +241,26 @@ namespace eosio { } template - void handle_http_request(typename websocketpp::server>::connection_ptr con) { - try { - bool is_secure = con->get_uri()->get_secure(); - const auto& local_endpoint = con->get_socket().lowest_layer().local_endpoint(); - auto local_socket_host_port = local_endpoint.address().to_string() + ":" + std::to_string(local_endpoint.port()); + bool allow_host(const typename T::request_type& req, typename websocketpp::server::connection_ptr con) { + bool is_secure = con->get_uri()->get_secure(); + const auto& local_endpoint = con->get_socket().lowest_layer().local_endpoint(); + auto local_socket_host_port = local_endpoint.address().to_string() + ":" + std::to_string(local_endpoint.port()); + + const auto& host_str = req.get_header("Host"); + if (host_str.empty() || !host_is_valid(host_str, local_socket_host_port, is_secure)) { + con->set_status(websocketpp::http::status_code::bad_request); + return false; + } + return true; + } + template + void handle_http_request(typename websocketpp::server::connection_ptr con) { + try { auto& req = con->get_request(); - const auto& host_str = req.get_header("Host"); - if (host_str.empty() || !host_is_valid(host_str, local_socket_host_port, is_secure)) { - con->set_status(websocketpp::http::status_code::bad_request); + + if(!allow_host(req, con)) return; - } if( !access_control_allow_origin.empty()) { con->append_header( "Access-Control-Allow-Origin", access_control_allow_origin ); @@ -258,7 +312,7 @@ namespace eosio { ws.set_reuse_addr(true); ws.set_max_http_body_size(max_body_size); ws.set_http_handler([&](connection_hdl hdl) { - handle_http_request(ws.get_con_from_hdl(hdl)); + handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ elog( "http: ${e}", ("e",e.to_detail_string())); @@ -275,17 +329,41 @@ namespace eosio { valid_hosts.emplace(host + ":" + resolved_port_str); } + void mangle_option_names() { + if(current_http_plugin_defaults.address_config_prefix.empty()) + return; + unix_socket_path_option_name.insert(0, current_http_plugin_defaults.address_config_prefix+"-"); + http_server_address_option_name.insert(0, current_http_plugin_defaults.address_config_prefix+"-"); + https_server_address_option_name.insert(0, current_http_plugin_defaults.address_config_prefix+"-"); + } }; + template<> + bool http_plugin_impl::allow_host(const detail::asio_local_with_stub_log::request_type& req, websocketpp::server::connection_ptr con) { + return true; + } + http_plugin::http_plugin():my(new http_plugin_impl()){} http_plugin::~http_plugin(){} void http_plugin::set_program_options(options_description&, options_description& cfg) { - cfg.add_options() - ("http-server-address", bpo::value()->default_value("127.0.0.1:8888"), - "The local IP and port to listen for incoming http connections; set blank to disable.") + my->mangle_option_names(); + if(current_http_plugin_defaults.default_unix_socket_path.length()) + cfg.add_options() + (my->unix_socket_path_option_name.c_str(), bpo::value()->default_value(current_http_plugin_defaults.default_unix_socket_path), + "The filename (relative to data-dir) to create a unix socket for HTTP RPC; set blank to disable."); + + if(current_http_plugin_defaults.default_http_port) + cfg.add_options() + (my->http_server_address_option_name.c_str(), bpo::value()->default_value("127.0.0.1:" + std::to_string(current_http_plugin_defaults.default_http_port)), + "The local IP and port to listen for incoming http connections; set blank to disable."); + else + cfg.add_options() + (my->http_server_address_option_name.c_str(), bpo::value(), + "The local IP and port to listen for incoming http connections; leave blank to disable."); - ("https-server-address", bpo::value(), + cfg.add_options() + (my->https_server_address_option_name.c_str(), bpo::value(), "The local IP and port to listen for incoming https connections; leave blank to disable.") ("https-certificate-chain-file", bpo::value(), @@ -334,8 +412,8 @@ namespace eosio { } tcp::resolver resolver( app().get_io_service()); - if( options.count( "http-server-address" ) && options.at( "http-server-address" ).as().length()) { - string lipstr = options.at( "http-server-address" ).as(); + if( options.count( my->http_server_address_option_name ) && options.at( my->http_server_address_option_name ).as().length()) { + string lipstr = options.at( my->http_server_address_option_name ).as(); string host = lipstr.substr( 0, lipstr.find( ':' )); string port = lipstr.substr( host.size() + 1, lipstr.size()); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str()); @@ -353,7 +431,14 @@ namespace eosio { } } - if( options.count( "https-server-address" ) && options.at( "https-server-address" ).as().length()) { + if( options.count( my->unix_socket_path_option_name ) && !options.at( my->unix_socket_path_option_name ).as().empty()) { + boost::filesystem::path sock_path = options.at(my->unix_socket_path_option_name).as(); + if (sock_path.is_relative()) + sock_path = app().data_dir() / sock_path; + my->unix_endpoint = asio::local::stream_protocol::endpoint(sock_path.string()); + } + + if( options.count( my->https_server_address_option_name ) && options.at( my->https_server_address_option_name ).as().length()) { if( !options.count( "https-certificate-chain-file" ) || options.at( "https-certificate-chain-file" ).as().empty()) { elog( "https-certificate-chain-file is required for HTTPS" ); @@ -365,7 +450,7 @@ namespace eosio { return; } - string lipstr = options.at( "https-server-address" ).as(); + string lipstr = options.at( my->https_server_address_option_name ).as(); string host = lipstr.substr( 0, lipstr.find( ':' )); string port = lipstr.substr( host.size() + 1, lipstr.size()); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str()); @@ -413,6 +498,28 @@ namespace eosio { } } + if(my->unix_endpoint) { + try { + my->unix_server.clear_access_channels(websocketpp::log::alevel::all); + my->unix_server.init_asio(&app().get_io_service()); + my->unix_server.set_max_http_body_size(my->max_body_size); + my->unix_server.listen(*my->unix_endpoint); + my->unix_server.set_http_handler([&](connection_hdl hdl) { + my->handle_http_request( my->unix_server.get_con_from_hdl(hdl)); + }); + my->unix_server.start_accept(); + } catch ( const fc::exception& e ){ + elog( "unix socket service failed to start: ${e}", ("e",e.to_detail_string())); + throw; + } catch ( const std::exception& e ){ + elog( "unix socket service failed to start: ${e}", ("e",e.what())); + throw; + } catch (...) { + elog("error thrown from unix socket io service"); + throw; + } + } + if(my->https_listen_endpoint) { try { my->create_server_for_endpoint(*my->https_listen_endpoint, my->https_server); diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index b70768c9671..e78300c6240 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -40,6 +40,19 @@ namespace eosio { */ using api_description = std::map; + struct http_plugin_defaults { + //If not empty, this string is prepended on to the various configuration + // items for setting listen addresses + string address_config_prefix; + //If empty, unix socket support will be completely disabled. If not empty, + // unix socket support is enabled with the given default path (treated relative + // to the datadir) + string default_unix_socket_path; + //If non 0, HTTP will be enabled by default on the given port number. If + // 0, HTTP will not be enabled by default + uint16_t default_http_port{0}; + }; + /** * This plugin starts an HTTP server and dispatches queries to * registered handles based upon URL. The handler is passed the @@ -60,6 +73,9 @@ namespace eosio { http_plugin(); virtual ~http_plugin(); + //must be called before initialize + static void set_defaults(const http_plugin_defaults config); + APPBASE_PLUGIN_REQUIRES() virtual void set_program_options(options_description&, options_description& cfg) override; diff --git a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp new file mode 100644 index 00000000000..4664d1d378a --- /dev/null +++ b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp @@ -0,0 +1,788 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#include + +#include +#include + +namespace websocketpp { +namespace transport { +namespace asio { + +namespace basic_socket { + +class local_connection : public lib::enable_shared_from_this { +public: + /// Type of this connection socket component + typedef local_connection type; + /// Type of a shared pointer to this connection socket component + typedef lib::shared_ptr ptr; + + /// Type of a pointer to the Asio io_service being used + typedef lib::asio::io_service* io_service_ptr; + /// Type of a pointer to the Asio io_service strand being used + typedef lib::shared_ptr strand_ptr; + /// Type of the ASIO socket being used + typedef lib::asio::local::stream_protocol::socket socket_type; + /// Type of a shared pointer to the socket being used. + typedef lib::shared_ptr socket_ptr; + + explicit local_connection() : m_state(UNINITIALIZED) { + } + + ptr get_shared() { + return shared_from_this(); + } + + bool is_secure() const { + return false; + } + + lib::asio::local::stream_protocol::socket & get_socket() { + return *m_socket; + } + + lib::asio::local::stream_protocol::socket & get_next_layer() { + return *m_socket; + } + + lib::asio::local::stream_protocol::socket & get_raw_socket() { + return *m_socket; + } + + std::string get_remote_endpoint(lib::error_code & ec) const { + return "UNIX Socket Endpoint"; + } + + void pre_init(init_handler callback) { + if (m_state != READY) { + callback(socket::make_error_code(socket::error::invalid_state)); + return; + } + + m_state = READING; + + callback(lib::error_code()); + } + + void post_init(init_handler callback) { + callback(lib::error_code()); + } +protected: + lib::error_code init_asio (io_service_ptr service, strand_ptr, bool) + { + if (m_state != UNINITIALIZED) { + return socket::make_error_code(socket::error::invalid_state); + } + + m_socket = lib::make_shared( + lib::ref(*service)); + + m_state = READY; + + return lib::error_code(); + } + + void set_handle(connection_hdl hdl) { + m_hdl = hdl; + } + + lib::asio::error_code cancel_socket() { + lib::asio::error_code ec; + m_socket->cancel(ec); + return ec; + } + + void async_shutdown(socket::shutdown_handler h) { + lib::asio::error_code ec; + m_socket->shutdown(lib::asio::ip::tcp::socket::shutdown_both, ec); + h(ec); + } + + lib::error_code get_ec() const { + return lib::error_code(); + } + + template + lib::error_code translate_ec(ErrorCodeType) { + return make_error_code(transport::error::pass_through); + } + + lib::error_code translate_ec(lib::error_code ec) { + return ec; + } +private: + enum state { + UNINITIALIZED = 0, + READY = 1, + READING = 2 + }; + + socket_ptr m_socket; + state m_state; + + connection_hdl m_hdl; + socket_init_handler m_socket_init_handler; +}; + +class local_endpoint { +public: + /// The type of this endpoint socket component + typedef local_endpoint type; + + /// The type of the corresponding connection socket component + typedef local_connection socket_con_type; + /// The type of a shared pointer to the corresponding connection socket + /// component. + typedef socket_con_type::ptr socket_con_ptr; + + explicit local_endpoint() {} + + bool is_secure() const { + return false; + } +}; +} + +/// Asio based endpoint transport component +/** + * transport::asio::endpoint implements an endpoint transport component using + * Asio. + */ +template +class local_endpoint : public config::socket_type { +public: + /// Type of this endpoint transport component + typedef local_endpoint type; + + /// Type of the concurrency policy + typedef typename config::concurrency_type concurrency_type; + /// Type of the socket policy + typedef typename config::socket_type socket_type; + /// Type of the error logging policy + typedef typename config::elog_type elog_type; + /// Type of the access logging policy + typedef typename config::alog_type alog_type; + + /// Type of the socket connection component + typedef typename socket_type::socket_con_type socket_con_type; + /// Type of a shared pointer to the socket connection component + typedef typename socket_con_type::ptr socket_con_ptr; + + /// Type of the connection transport component associated with this + /// endpoint transport component + typedef asio::connection transport_con_type; + /// Type of a shared pointer to the connection transport component + /// associated with this endpoint transport component + typedef typename transport_con_type::ptr transport_con_ptr; + + /// Type of a pointer to the ASIO io_service being used + typedef lib::asio::io_service * io_service_ptr; + /// Type of a shared pointer to the acceptor being used + typedef lib::shared_ptr acceptor_ptr; + /// Type of timer handle + typedef lib::shared_ptr timer_ptr; + /// Type of a shared pointer to an io_service work object + typedef lib::shared_ptr work_ptr; + + // generate and manage our own io_service + explicit local_endpoint() + : m_io_service(NULL) + , m_state(UNINITIALIZED) + { + //std::cout << "transport::asio::endpoint constructor" << std::endl; + } + + ~local_endpoint() { + if (m_acceptor && m_state == LISTENING) + ::unlink(m_acceptor->local_endpoint().path().c_str()); + + // Explicitly destroy local objects + m_acceptor.reset(); + m_work.reset(); + } + + /// transport::asio objects are moveable but not copyable or assignable. + /// The following code sets this situation up based on whether or not we + /// have C++11 support or not +#ifdef _WEBSOCKETPP_DEFAULT_DELETE_FUNCTIONS_ + local_endpoint(const local_endpoint & src) = delete; + local_endpoint& operator= (const local_endpoint & rhs) = delete; +#else +private: + local_endpoint(const local_endpoint & src); + local_endpoint & operator= (const local_endpoint & rhs); +public: +#endif // _WEBSOCKETPP_DEFAULT_DELETE_FUNCTIONS_ + +#ifdef _WEBSOCKETPP_MOVE_SEMANTICS_ + local_endpoint (local_endpoint && src) + : config::socket_type(std::move(src)) + , m_tcp_pre_init_handler(src.m_tcp_pre_init_handler) + , m_tcp_post_init_handler(src.m_tcp_post_init_handler) + , m_io_service(src.m_io_service) + , m_acceptor(src.m_acceptor) + , m_elog(src.m_elog) + , m_alog(src.m_alog) + , m_state(src.m_state) + { + src.m_io_service = NULL; + src.m_acceptor = NULL; + src.m_state = UNINITIALIZED; + } + +#endif // _WEBSOCKETPP_MOVE_SEMANTICS_ + + /// Return whether or not the endpoint produces secure connections. + bool is_secure() const { + return socket_type::is_secure(); + } + + /// initialize asio transport with external io_service (exception free) + /** + * Initialize the ASIO transport policy for this endpoint using the provided + * io_service object. asio_init must be called exactly once on any endpoint + * that uses transport::asio before it can be used. + * + * @param ptr A pointer to the io_service to use for asio events + * @param ec Set to indicate what error occurred, if any. + */ + void init_asio(io_service_ptr ptr, lib::error_code & ec) { + if (m_state != UNINITIALIZED) { + m_elog->write(log::elevel::library, + "asio::init_asio called from the wrong state"); + using websocketpp::error::make_error_code; + ec = make_error_code(websocketpp::error::invalid_state); + return; + } + + m_alog->write(log::alevel::devel,"asio::init_asio"); + + m_io_service = ptr; + m_acceptor = lib::make_shared( + lib::ref(*m_io_service)); + + m_state = READY; + ec = lib::error_code(); + } + + /// initialize asio transport with external io_service + /** + * Initialize the ASIO transport policy for this endpoint using the provided + * io_service object. asio_init must be called exactly once on any endpoint + * that uses transport::asio before it can be used. + * + * @param ptr A pointer to the io_service to use for asio events + */ + void init_asio(io_service_ptr ptr) { + lib::error_code ec; + init_asio(ptr,ec); + if (ec) { throw exception(ec); } + } + + /// Sets the tcp pre init handler + /** + * The tcp pre init handler is called after the raw tcp connection has been + * established but before any additional wrappers (proxy connects, TLS + * handshakes, etc) have been performed. + * + * @since 0.3.0 + * + * @param h The handler to call on tcp pre init. + */ + void set_tcp_pre_init_handler(tcp_init_handler h) { + m_tcp_pre_init_handler = h; + } + + /// Sets the tcp pre init handler (deprecated) + /** + * The tcp pre init handler is called after the raw tcp connection has been + * established but before any additional wrappers (proxy connects, TLS + * handshakes, etc) have been performed. + * + * @deprecated Use set_tcp_pre_init_handler instead + * + * @param h The handler to call on tcp pre init. + */ + void set_tcp_init_handler(tcp_init_handler h) { + set_tcp_pre_init_handler(h); + } + + /// Sets the tcp post init handler + /** + * The tcp post init handler is called after the tcp connection has been + * established and all additional wrappers (proxy connects, TLS handshakes, + * etc have been performed. This is fired before any bytes are read or any + * WebSocket specific handshake logic has been performed. + * + * @since 0.3.0 + * + * @param h The handler to call on tcp post init. + */ + void set_tcp_post_init_handler(tcp_init_handler h) { + m_tcp_post_init_handler = h; + } + + /// Retrieve a reference to the endpoint's io_service + /** + * The io_service may be an internal or external one. This may be used to + * call methods of the io_service that are not explicitly wrapped by the + * endpoint. + * + * This method is only valid after the endpoint has been initialized with + * `init_asio`. No error will be returned if it isn't. + * + * @return A reference to the endpoint's io_service + */ + lib::asio::io_service & get_io_service() { + return *m_io_service; + } + + /// Set up endpoint for listening manually (exception free) + /** + * Bind the internal acceptor using the specified settings. The endpoint + * must have been initialized by calling init_asio before listening. + * + * @param ep An endpoint to read settings from + * @param ec Set to indicate what error occurred, if any. + */ + void listen(lib::asio::local::stream_protocol::endpoint const & ep, lib::error_code & ec) + { + if (m_state != READY) { + m_elog->write(log::elevel::library, + "asio::listen called from the wrong state"); + using websocketpp::error::make_error_code; + ec = make_error_code(websocketpp::error::invalid_state); + return; + } + + m_alog->write(log::alevel::devel,"asio::listen"); + + lib::asio::error_code bec; + + { + boost::system::error_code test_ec; + lib::asio::local::stream_protocol::socket test_socket(get_io_service()); + test_socket.connect(ep, test_ec); + + //looks like a service is already running on that socket, probably another keosd, don't touch it + if(test_ec == boost::system::errc::success) + bec = boost::system::errc::make_error_code(boost::system::errc::address_in_use); + //socket exists but no one home, go ahead and remove it and continue on + else if(test_ec == boost::system::errc::connection_refused) + ::unlink(ep.path().c_str()); + else if(test_ec != boost::system::errc::no_such_file_or_directory) + bec = test_ec; + } + + if (!bec) { + m_acceptor->open(ep.protocol(),bec); + } + if (!bec) { + m_acceptor->bind(ep,bec); + } + if (!bec) { + m_acceptor->listen(boost::asio::socket_base::max_listen_connections,bec); + } + if (bec) { + if (m_acceptor->is_open()) { + m_acceptor->close(); + } + log_err(log::elevel::info,"asio listen",bec); + ec = bec; + } else { + m_state = LISTENING; + ec = lib::error_code(); + } + } + + /// Set up endpoint for listening manually + /** + * Bind the internal acceptor using the settings specified by the endpoint e + * + * @param ep An endpoint to read settings from + */ + void listen(lib::asio::local::stream_protocol::endpoint const & ep) { + lib::error_code ec; + listen(ep,ec); + if (ec) { throw exception(ec); } + } + + /// Stop listening (exception free) + /** + * Stop listening and accepting new connections. This will not end any + * existing connections. + * + * @since 0.3.0-alpha4 + * @param ec A status code indicating an error, if any. + */ + void stop_listening(lib::error_code & ec) { + if (m_state != LISTENING) { + m_elog->write(log::elevel::library, + "asio::listen called from the wrong state"); + using websocketpp::error::make_error_code; + ec = make_error_code(websocketpp::error::invalid_state); + return; + } + + ::unlink(m_acceptor->local_endpoint().path().c_str()); + m_acceptor->close(); + m_state = READY; + ec = lib::error_code(); + } + + /// Stop listening + /** + * Stop listening and accepting new connections. This will not end any + * existing connections. + * + * @since 0.3.0-alpha4 + */ + void stop_listening() { + lib::error_code ec; + stop_listening(ec); + if (ec) { throw exception(ec); } + } + + /// Check if the endpoint is listening + /** + * @return Whether or not the endpoint is listening. + */ + bool is_listening() const { + return (m_state == LISTENING); + } + + /// wraps the run method of the internal io_service object + std::size_t run() { + return m_io_service->run(); + } + + /// wraps the run_one method of the internal io_service object + /** + * @since 0.3.0-alpha4 + */ + std::size_t run_one() { + return m_io_service->run_one(); + } + + /// wraps the stop method of the internal io_service object + void stop() { + m_io_service->stop(); + } + + /// wraps the poll method of the internal io_service object + std::size_t poll() { + return m_io_service->poll(); + } + + /// wraps the poll_one method of the internal io_service object + std::size_t poll_one() { + return m_io_service->poll_one(); + } + + /// wraps the reset method of the internal io_service object + void reset() { + m_io_service->reset(); + } + + /// wraps the stopped method of the internal io_service object + bool stopped() const { + return m_io_service->stopped(); + } + + /// Marks the endpoint as perpetual, stopping it from exiting when empty + /** + * Marks the endpoint as perpetual. Perpetual endpoints will not + * automatically exit when they run out of connections to process. To stop + * a perpetual endpoint call `end_perpetual`. + * + * An endpoint may be marked perpetual at any time by any thread. It must be + * called either before the endpoint has run out of work or before it was + * started + * + * @since 0.3.0 + */ + void start_perpetual() { + m_work = lib::make_shared( + lib::ref(*m_io_service) + ); + } + + /// Clears the endpoint's perpetual flag, allowing it to exit when empty + /** + * Clears the endpoint's perpetual flag. This will cause the endpoint's run + * method to exit normally when it runs out of connections. If there are + * currently active connections it will not end until they are complete. + * + * @since 0.3.0 + */ + void stop_perpetual() { + m_work.reset(); + } + + /// Call back a function after a period of time. + /** + * Sets a timer that calls back a function after the specified period of + * milliseconds. Returns a handle that can be used to cancel the timer. + * A cancelled timer will return the error code error::operation_aborted + * A timer that expired will return no error. + * + * @param duration Length of time to wait in milliseconds + * @param callback The function to call back when the timer has expired + * @return A handle that can be used to cancel the timer if it is no longer + * needed. + */ + timer_ptr set_timer(long duration, timer_handler callback) { + timer_ptr new_timer = lib::make_shared( + *m_io_service, + lib::asio::milliseconds(duration) + ); + + new_timer->async_wait( + lib::bind( + &type::handle_timer, + this, + new_timer, + callback, + lib::placeholders::_1 + ) + ); + + return new_timer; + } + + /// Timer handler + /** + * The timer pointer is included to ensure the timer isn't destroyed until + * after it has expired. + * + * @param t Pointer to the timer in question + * @param callback The function to call back + * @param ec A status code indicating an error, if any. + */ + void handle_timer(timer_ptr, timer_handler callback, + lib::asio::error_code const & ec) + { + if (ec) { + if (ec == lib::asio::error::operation_aborted) { + callback(make_error_code(transport::error::operation_aborted)); + } else { + m_elog->write(log::elevel::info, + "asio handle_timer error: "+ec.message()); + log_err(log::elevel::info,"asio handle_timer",ec); + callback(ec); + } + } else { + callback(lib::error_code()); + } + } + + /// Accept the next connection attempt and assign it to con (exception free) + /** + * @param tcon The connection to accept into. + * @param callback The function to call when the operation is complete. + * @param ec A status code indicating an error, if any. + */ + void async_accept(transport_con_ptr tcon, accept_handler callback, + lib::error_code & ec) + { + if (m_state != LISTENING) { + using websocketpp::error::make_error_code; + ec = make_error_code(websocketpp::error::async_accept_not_listening); + return; + } + + m_alog->write(log::alevel::devel, "asio::async_accept"); + + if (config::enable_multithreading) { + m_acceptor->async_accept( + tcon->get_raw_socket(), + tcon->get_strand()->wrap(lib::bind( + &type::handle_accept, + this, + callback, + lib::placeholders::_1 + )) + ); + } else { + m_acceptor->async_accept( + tcon->get_raw_socket(), + lib::bind( + &type::handle_accept, + this, + callback, + lib::placeholders::_1 + ) + ); + } + } + + /// Accept the next connection attempt and assign it to con. + /** + * @param tcon The connection to accept into. + * @param callback The function to call when the operation is complete. + */ + void async_accept(transport_con_ptr tcon, accept_handler callback) { + lib::error_code ec; + async_accept(tcon,callback,ec); + if (ec) { throw exception(ec); } + } +protected: + /// Initialize logging + /** + * The loggers are located in the main endpoint class. As such, the + * transport doesn't have direct access to them. This method is called + * by the endpoint constructor to allow shared logging from the transport + * component. These are raw pointers to member variables of the endpoint. + * In particular, they cannot be used in the transport constructor as they + * haven't been constructed yet, and cannot be used in the transport + * destructor as they will have been destroyed by then. + */ + void init_logging(alog_type* a, elog_type* e) { + m_alog = a; + m_elog = e; + } + + void handle_accept(accept_handler callback, lib::asio::error_code const & + asio_ec) + { + lib::error_code ret_ec; + + m_alog->write(log::alevel::devel, "asio::handle_accept"); + + if (asio_ec) { + if (asio_ec == lib::asio::errc::operation_canceled) { + ret_ec = make_error_code(websocketpp::error::operation_canceled); + } else { + log_err(log::elevel::info,"asio handle_accept",asio_ec); + ret_ec = asio_ec; + } + } + + callback(ret_ec); + } + + /// Asio connect timeout handler + /** + * The timer pointer is included to ensure the timer isn't destroyed until + * after it has expired. + * + * @param tcon Pointer to the transport connection that is being connected + * @param con_timer Pointer to the timer in question + * @param callback The function to call back + * @param ec A status code indicating an error, if any. + */ + void handle_connect_timeout(transport_con_ptr tcon, timer_ptr, + connect_handler callback, lib::error_code const & ec) + { + lib::error_code ret_ec; + + if (ec) { + if (ec == transport::error::operation_aborted) { + m_alog->write(log::alevel::devel, + "asio handle_connect_timeout timer cancelled"); + return; + } + + log_err(log::elevel::devel,"asio handle_connect_timeout",ec); + ret_ec = ec; + } else { + ret_ec = make_error_code(transport::error::timeout); + } + + m_alog->write(log::alevel::devel,"TCP connect timed out"); + tcon->cancel_socket_checked(); + callback(ret_ec); + } + + void handle_connect(transport_con_ptr tcon, timer_ptr con_timer, + connect_handler callback, lib::asio::error_code const & ec) + { + if (ec == lib::asio::error::operation_aborted || + lib::asio::is_neg(con_timer->expires_from_now())) + { + m_alog->write(log::alevel::devel,"async_connect cancelled"); + return; + } + + con_timer->cancel(); + + if (ec) { + log_err(log::elevel::info,"asio async_connect",ec); + callback(ec); + return; + } + + if (m_alog->static_test(log::alevel::devel)) { + m_alog->write(log::alevel::devel, + "Async connect to "+tcon->get_remote_endpoint()+" successful."); + } + + callback(lib::error_code()); + } + + /// Initialize a connection + /** + * init is called by an endpoint once for each newly created connection. + * It's purpose is to give the transport policy the chance to perform any + * transport specific initialization that couldn't be done via the default + * constructor. + * + * @param tcon A pointer to the transport portion of the connection. + * + * @return A status code indicating the success or failure of the operation + */ + lib::error_code init(transport_con_ptr tcon) { + m_alog->write(log::alevel::devel, "transport::asio::init"); + + lib::error_code ec; + + ec = tcon->init_asio(m_io_service); + if (ec) {return ec;} + + tcon->set_tcp_pre_init_handler(m_tcp_pre_init_handler); + tcon->set_tcp_post_init_handler(m_tcp_post_init_handler); + + return lib::error_code(); + } +private: + /// Convenience method for logging the code and message for an error_code + template + void log_err(log::level l, char const * msg, error_type const & ec) { + std::stringstream s; + s << msg << " error: " << ec << " (" << ec.message() << ")"; + m_elog->write(l,s.str()); + } + + enum state { + UNINITIALIZED = 0, + READY = 1, + LISTENING = 2 + }; + + // Handlers + tcp_init_handler m_tcp_pre_init_handler; + tcp_init_handler m_tcp_post_init_handler; + + // Network Resources + io_service_ptr m_io_service; + acceptor_ptr m_acceptor; + work_ptr m_work; + + elog_type* m_elog; + alog_type* m_alog; + + // Transport state + state m_state; +}; + +} // namespace asio +} // namespace transport +} // namespace websocketpp diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index c4d11e1bdee..f0e27401dfe 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -29,6 +29,7 @@ #include #include +#include #include #include @@ -52,11 +53,16 @@ struct filter_entry { name receiver; name action; name actor; - std::tuple key() const { - return std::make_tuple(receiver, action, actor); - } + friend bool operator<( const filter_entry& a, const filter_entry& b ) { - return a.key() < b.key(); + return std::tie( a.receiver, a.action, a.actor ) < std::tie( b.receiver, b.action, b.actor ); + } + + // receiver action actor + bool match( const name& rr, const name& an, const name& ar ) const { + return (receiver.value == 0 || receiver == rr) && + (action.value == 0 || action == an) && + (actor.value == 0 || actor == ar); } }; @@ -91,6 +97,7 @@ class mongo_db_plugin_impl { void purge_abi_cache(); bool add_action_trace( mongocxx::bulk_write& bulk_action_traces, const chain::action_trace& atrace, + const chain::transaction_trace_ptr& t, bool executed, const std::chrono::milliseconds& now ); void update_account(const chain::action& act); @@ -116,6 +123,7 @@ class mongo_db_plugin_impl { uint32_t start_block_num = 0; std::atomic_bool start_block_reached{false}; + bool is_producer = false; bool filter_on_star = true; std::set filter_on; std::set filter_out; @@ -127,8 +135,17 @@ class mongo_db_plugin_impl { std::string db_name; mongocxx::instance mongo_inst; - mongocxx::client mongo_conn; - mongocxx::collection accounts; + fc::optional mongo_pool; + + // consum thread + mongocxx::collection _accounts; + mongocxx::collection _trans; + mongocxx::collection _trans_traces; + mongocxx::collection _action_traces; + mongocxx::collection _block_states; + mongocxx::collection _blocks; + mongocxx::collection _pub_keys; + mongocxx::collection _account_controls; size_t max_queue_size = 0; int queue_sleep_time = 0; @@ -202,30 +219,41 @@ const std::string mongo_db_plugin_impl::account_controls_col = "account_controls bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_trace ) const { bool include = false; - if( filter_on_star || filter_on.find( {action_trace.receipt.receiver, action_trace.act.name, 0} ) != filter_on.end() ) { + if( filter_on_star ) { include = true; } else { - for( const auto& a : action_trace.act.authorization ) { - if( filter_on.find( {action_trace.receipt.receiver, action_trace.act.name, a.actor} ) != filter_on.end() ) { - include = true; - break; + auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace]( const auto& filter ) { + return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 ); + } ); + if( itr != filter_on.cend() ) { + include = true; + } else { + for( const auto& a : action_trace.act.authorization ) { + auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace, &a]( const auto& filter ) { + return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor ); + } ); + if( itr != filter_on.cend() ) { + include = true; + break; + } } } } if( !include ) { return false; } - if( filter_out.find( {action_trace.receipt.receiver, 0, 0} ) != filter_out.end() ) { - return false; - } - if( filter_out.find( {action_trace.receipt.receiver, action_trace.act.name, 0} ) != filter_out.end() ) { - return false; - } + auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace]( const auto& filter ) { + return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 ); + } ); + if( itr != filter_out.cend() ) { return false; } + for( const auto& a : action_trace.act.authorization ) { - if( filter_out.find( {action_trace.receipt.receiver, action_trace.act.name, a.actor} ) != filter_out.end() ) { - return false; - } + auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace, &a]( const auto& filter ) { + return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor ); + } ); + if( itr != filter_out.cend() ) { return false; } } + return true; } @@ -266,6 +294,28 @@ void mongo_db_plugin_impl::accepted_transaction( const chain::transaction_metada void mongo_db_plugin_impl::applied_transaction( const chain::transaction_trace_ptr& t ) { try { + // Traces emitted from an incomplete block leave the producer_block_id as empty. + // + // Avoid adding the action traces or transaction traces to the database if the producer_block_id is empty. + // This way traces from speculatively executed transactions are not included in the Mongo database which can + // avoid potential confusion for consumers of that database. + // + // Due to forks, it could be possible for multiple incompatible action traces with the same block_num and trx_id + // to exist in the database. And if the producer double produces a block, even the block_time may not + // disambiguate the two action traces. Without a producer_block_id to disambiguate and determine if the action + // trace comes from an orphaned fork branching off of the blockchain, consumers of the Mongo DB database may be + // reacting to a stale action trace that never actually executed in the current blockchain. + // + // It is better to avoid this potential confusion by not logging traces from speculative execution, i.e. emitted + // from an incomplete block. This means that traces will not be recorded in speculative read-mode, but + // users should not be using the mongo_db_plugin in that mode anyway. + // + // Allow logging traces if node is a producer for testing purposes, so a single nodeos can do both for testing. + // + // It is recommended to run mongo_db_plugin in read-mode = read-only. + // + if( !is_producer && !t->producer_block_id.valid() ) + return; // always queue since account information always gathered queue( transaction_trace_queue, t ); } catch (fc::exception& e) { @@ -279,7 +329,7 @@ void mongo_db_plugin_impl::applied_transaction( const chain::transaction_trace_p void mongo_db_plugin_impl::applied_irreversible_block( const chain::block_state_ptr& bs ) { try { - if( store_blocks || store_transactions ) { + if( store_blocks || store_block_states || store_transactions ) { queue( irreversible_block_state_queue, bs ); } } catch (fc::exception& e) { @@ -312,6 +362,18 @@ void mongo_db_plugin_impl::accepted_block( const chain::block_state_ptr& bs ) { void mongo_db_plugin_impl::consume_blocks() { try { + auto mongo_client = mongo_pool->acquire(); + auto& mongo_conn = *mongo_client; + + _accounts = mongo_conn[db_name][accounts_col]; + _trans = mongo_conn[db_name][trans_col]; + _trans_traces = mongo_conn[db_name][trans_traces_col]; + _action_traces = mongo_conn[db_name][action_traces_col]; + _blocks = mongo_conn[db_name][blocks_col]; + _block_states = mongo_conn[db_name][block_states_col]; + _pub_keys = mongo_conn[db_name][pub_keys_col]; + _account_controls = mongo_conn[db_name][account_controls_col]; + while (true) { boost::mutex::scoped_lock lock(mtx); while ( transaction_metadata_queue.empty() && @@ -505,7 +567,7 @@ optional mongo_db_plugin_impl::get_abi_serializer( account_name return itr->serializer; } - auto account = accounts.find_one( make_document( kvp("name", n.to_string())) ); + auto account = _accounts.find_one( make_document( kvp("name", n.to_string())) ); if(account) { auto view = account->view(); abi_def abi; @@ -632,7 +694,6 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti using bsoncxx::builder::basic::make_array; namespace bbb = bsoncxx::builder::basic; - auto trans = mongo_conn[db_name][trans_col]; auto trans_doc = bsoncxx::builder::basic::document{}; auto now = std::chrono::duration_cast( @@ -692,8 +753,8 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti try { mongocxx::options::update update_opts{}; update_opts.upsert( true ); - if( !trans.update_one( make_document( kvp( "trx_id", trx_id_str ) ), - make_document( kvp( "$set", trans_doc.view() ) ), update_opts ) ) { + if( !_trans.update_one( make_document( kvp( "trx_id", trx_id_str ) ), + make_document( kvp( "$set", trans_doc.view() ) ), update_opts ) ) { EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert trans ${id}", ("id", trx_id) ); } } catch( ... ) { @@ -704,6 +765,7 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti bool mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces, const chain::action_trace& atrace, + const chain::transaction_trace_ptr& t, bool executed, const std::chrono::milliseconds& now ) { using namespace bsoncxx::types; @@ -734,6 +796,9 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces elog( " JSON: ${j}", ("j", json) ); } } + if( t->receipt.valid() ) { + action_traces_doc.append( kvp( "trx_status", std::string( t->receipt->status ) ) ); + } action_traces_doc.append( kvp( "createdAt", b_date{now} ) ); mongocxx::model::insert_one insert_op{action_traces_doc.view()}; @@ -742,7 +807,7 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces } for( const auto& iline_atrace : atrace.inline_traces ) { - added |= add_action_trace( bulk_action_traces, iline_atrace, executed, now ); + added |= add_action_trace( bulk_action_traces, iline_atrace, t, executed, now ); } return added; @@ -753,8 +818,6 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio using namespace bsoncxx::types; using bsoncxx::builder::basic::kvp; - auto trans_traces = mongo_conn[db_name][trans_traces_col]; - auto action_traces = mongo_conn[db_name][action_traces_col]; auto trans_traces_doc = bsoncxx::builder::basic::document{}; auto now = std::chrono::duration_cast( @@ -762,58 +825,65 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio mongocxx::options::bulk_write bulk_opts; bulk_opts.ordered(false); - mongocxx::bulk_write bulk_action_traces = action_traces.create_bulk_write(bulk_opts); + mongocxx::bulk_write bulk_action_traces = _action_traces.create_bulk_write(bulk_opts); bool write_atraces = false; bool executed = t->receipt.valid() && t->receipt->status == chain::transaction_receipt_header::executed; for( const auto& atrace : t->action_traces ) { try { - write_atraces |= add_action_trace( bulk_action_traces, atrace, executed, now ); + write_atraces |= add_action_trace( bulk_action_traces, atrace, t, executed, now ); } catch(...) { handle_mongo_exception("add action traces", __LINE__); } } - if( write_atraces ) { - try { - if( !bulk_action_traces.execute() ) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Bulk action traces insert failed for transaction trace: ${id}", ("id", t->id)); - } - } catch(...) { - handle_mongo_exception("action traces insert", __LINE__); - } - } - - if( !start_block_reached || !store_transaction_traces ) return; + if( !start_block_reached ) return; //< add_action_trace calls update_account which must be called always if( !write_atraces ) return; //< do not insert transaction_trace if all action_traces filtered out // transaction trace insert - auto v = to_variant_with_abi( *t ); - string json = fc::json::to_string( v ); - try { - const auto& value = bsoncxx::from_json( json ); - trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - } catch( bsoncxx::exception& ) { + if( store_transaction_traces ) { try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - trans_traces_doc.append( kvp( "non-utf8-purged", b_bool{true} )); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert transaction JSON to MongoDB JSON: ${e}", ("e", e.what())); - elog( " JSON: ${j}", ("j", json)); + auto v = to_variant_with_abi( *t ); + string json = fc::json::to_string( v ); + try { + const auto& value = bsoncxx::from_json( json ); + trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); + } catch( bsoncxx::exception& ) { + try { + json = fc::prune_invalid_utf8( json ); + const auto& value = bsoncxx::from_json( json ); + trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); + trans_traces_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert transaction JSON to MongoDB JSON: ${e}", ("e", e.what()) ); + elog( " JSON: ${j}", ("j", json) ); + } + } + trans_traces_doc.append( kvp( "createdAt", b_date{now} ) ); + + try { + if( !_trans_traces.insert_one( trans_traces_doc.view() ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert trans ${id}", ("id", t->id) ); + } + } catch( ... ) { + handle_mongo_exception( "trans_traces insert: " + json, __LINE__ ); + } + } catch( ... ) { + handle_mongo_exception( "trans_traces serialization: " + t->id.str(), __LINE__ ); } } - trans_traces_doc.append( kvp( "createdAt", b_date{now} )); + // insert action_traces try { - if( !trans_traces.insert_one( trans_traces_doc.view())) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert trans ${id}", ("id", t->id)); + if( !bulk_action_traces.execute() ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, + "Bulk action traces insert failed for transaction trace: ${id}", ("id", t->id) ); } - } catch(...) { - handle_mongo_exception("trans_traces insert: " + json, __LINE__); + } catch( ... ) { + handle_mongo_exception( "action traces insert", __LINE__ ); } + } void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr& bs ) { @@ -835,12 +905,10 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); if( store_block_states ) { - auto block_states = mongo_conn[db_name][block_states_col]; auto block_state_doc = bsoncxx::builder::basic::document{}; block_state_doc.append( kvp( "block_num", b_int32{static_cast(block_num)} ), kvp( "block_id", block_id_str ), - kvp( "validated", b_bool{bs->validated} ), - kvp( "in_current_chain", b_bool{bs->in_current_chain} ) ); + kvp( "validated", b_bool{bs->validated} ) ); const chain::block_header_state& bhs = *bs; @@ -862,8 +930,8 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr block_state_doc.append( kvp( "createdAt", b_date{now} ) ); try { - if( !block_states.update_one( make_document( kvp( "block_id", block_id_str ) ), - make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { + if( !_block_states.update_one( make_document( kvp( "block_id", block_id_str ) ), + make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${bid}", ("bid", block_id) ); } } catch( ... ) { @@ -872,7 +940,6 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr } if( store_blocks ) { - auto blocks = mongo_conn[db_name][blocks_col]; auto block_doc = bsoncxx::builder::basic::document{}; block_doc.append( kvp( "block_num", b_int32{static_cast(block_num)} ), kvp( "block_id", block_id_str ) ); @@ -896,8 +963,8 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr block_doc.append( kvp( "createdAt", b_date{now} ) ); try { - if( !blocks.update_one( make_document( kvp( "block_id", block_id_str ) ), - make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { + if( !_blocks.update_one( make_document( kvp( "block_id", block_id_str ) ), + make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${bid}", ("bid", block_id) ); } } catch( ... ) { @@ -913,37 +980,49 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ using bsoncxx::builder::basic::make_document; using bsoncxx::builder::basic::kvp; - auto blocks = mongo_conn[db_name][blocks_col]; - auto trans = mongo_conn[db_name][trans_col]; const auto block_id = bs->block->id(); const auto block_id_str = block_id.str(); - const auto block_num = bs->block->block_num(); auto now = std::chrono::duration_cast( std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); if( store_blocks ) { - auto ir_block = find_block( blocks, block_id_str ); + auto ir_block = find_block( _blocks, block_id_str ); + if( !ir_block ) { + _process_accepted_block( bs ); + ir_block = find_block( _blocks, block_id_str ); + if( !ir_block ) return; // should never happen + } + + auto update_doc = make_document( kvp( "$set", make_document( kvp( "irreversible", b_bool{true} ), + kvp( "validated", b_bool{bs->validated} ), + kvp( "updatedAt", b_date{now} ) ) ) ); + + _blocks.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); + } + + if( store_block_states ) { + auto ir_block = find_block( _block_states, block_id_str ); if( !ir_block ) { _process_accepted_block( bs ); - ir_block = find_block( blocks, block_id_str ); + ir_block = find_block( _block_states, block_id_str ); if( !ir_block ) return; // should never happen } auto update_doc = make_document( kvp( "$set", make_document( kvp( "irreversible", b_bool{true} ), kvp( "validated", b_bool{bs->validated} ), - kvp( "in_current_chain", b_bool{bs->in_current_chain} ), kvp( "updatedAt", b_date{now} ) ) ) ); - blocks.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); + _block_states.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); } if( store_transactions ) { + const auto block_num = bs->block->block_num(); bool transactions_in_block = false; mongocxx::options::bulk_write bulk_opts; bulk_opts.ordered( false ); - auto bulk = trans.create_bulk_write( bulk_opts ); + auto bulk = _trans.create_bulk_write( bulk_opts ); for( const auto& receipt : bs->block->transactions ) { string trx_id_str; @@ -990,9 +1069,7 @@ void mongo_db_plugin_impl::add_pub_keys( const vector& keys, if( keys.empty()) return; - auto pub_keys = mongo_conn[db_name][pub_keys_col]; - - mongocxx::bulk_write bulk = pub_keys.create_bulk_write(); + mongocxx::bulk_write bulk = _pub_keys.create_bulk_write(); for( const auto& pub_key_weight : keys ) { auto find_doc = bsoncxx::builder::basic::document(); @@ -1025,10 +1102,8 @@ void mongo_db_plugin_impl::remove_pub_keys( const account_name& name, const perm using bsoncxx::builder::basic::kvp; using bsoncxx::builder::basic::make_document; - auto pub_keys = mongo_conn[db_name][pub_keys_col]; - try { - auto result = pub_keys.delete_many( make_document( kvp( "account", name.to_string()), + auto result = _pub_keys.delete_many( make_document( kvp( "account", name.to_string()), kvp( "permission", permission.to_string()))); if( !result ) { EOS_ASSERT( false, chain::mongo_db_update_fail, @@ -1050,9 +1125,7 @@ void mongo_db_plugin_impl::add_account_control( const vector(); - create_account( accounts, newacc.name, now ); + create_account( _accounts, newacc.name, now ); add_pub_keys( newacc.owner.keys, newacc.name, owner, now ); add_account_control( newacc.owner.accounts, newacc.name, owner, now ); @@ -1169,10 +1240,10 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) abi_cache_index.erase( setabi.account ); - auto account = find_account( accounts, setabi.account ); + auto account = find_account( _accounts, setabi.account ); if( !account ) { - create_account( accounts, setabi.account, now ); - account = find_account( accounts, setabi.account ); + create_account( _accounts, setabi.account, now ); + account = find_account( _accounts, setabi.account ); } if( account ) { abi_def abi_def = fc::raw::unpack( setabi.abi ); @@ -1184,8 +1255,8 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) kvp( "updatedAt", b_date{now} )))); try { - if( !accounts.update_one( make_document( kvp( "_id", account->view()["_id"].get_oid())), - update_from.view())) { + if( !_accounts.update_one( make_document( kvp( "_id", account->view()["_id"].get_oid())), + update_from.view())) { EOS_ASSERT( false, chain::mongo_db_update_fail, "Failed to udpdate account ${n}", ("n", setabi.account)); } } catch( ... ) { @@ -1203,8 +1274,6 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) } mongo_db_plugin_impl::mongo_db_plugin_impl() -: mongo_inst{} -, mongo_conn{} { } @@ -1216,6 +1285,8 @@ mongo_db_plugin_impl::~mongo_db_plugin_impl() { condition.notify_one(); consume_thread.join(); + + mongo_pool.reset(); } catch( std::exception& e ) { elog( "Exception on mongo_db_plugin shutdown of consume thread: ${e}", ("e", e.what())); } @@ -1225,12 +1296,15 @@ mongo_db_plugin_impl::~mongo_db_plugin_impl() { void mongo_db_plugin_impl::wipe_database() { ilog("mongo db wipe_database"); + auto client = mongo_pool->acquire(); + auto& mongo_conn = *client; + auto block_states = mongo_conn[db_name][block_states_col]; auto blocks = mongo_conn[db_name][blocks_col]; auto trans = mongo_conn[db_name][trans_col]; auto trans_traces = mongo_conn[db_name][trans_traces_col]; auto action_traces = mongo_conn[db_name][action_traces_col]; - accounts = mongo_conn[db_name][accounts_col]; + auto accounts = mongo_conn[db_name][accounts_col]; auto pub_keys = mongo_conn[db_name][pub_keys_col]; auto account_controls = mongo_conn[db_name][account_controls_col]; @@ -1242,6 +1316,7 @@ void mongo_db_plugin_impl::wipe_database() { accounts.drop(); pub_keys.drop(); account_controls.drop(); + ilog("done wipe_database"); } void mongo_db_plugin_impl::init() { @@ -1251,60 +1326,69 @@ void mongo_db_plugin_impl::init() { // Create the native contract accounts manually; sadly, we can't run their contracts to make them create themselves // See native_contract_chain_initializer::prepare_database() - accounts = mongo_conn[db_name][accounts_col]; - if (accounts.count(make_document()) == 0) { - auto now = std::chrono::duration_cast( - std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); - - auto doc = make_document( kvp( "name", name( chain::config::system_account_name ).to_string()), - kvp( "createdAt", b_date{now} )); - - try { - if( !accounts.insert_one( doc.view())) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert account ${n}", - ("n", name( chain::config::system_account_name ).to_string())); - } - } catch(...) { - handle_mongo_exception("account insert", __LINE__); - } - - try { - // blocks indexes - auto blocks = mongo_conn[db_name][blocks_col]; - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); - - auto block_stats = mongo_conn[db_name][block_states_col]; - block_stats.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - block_stats.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); - - // accounts indexes - accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); - - // transactions indexes - auto trans = mongo_conn[db_name][trans_col]; - trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); - - auto trans_trace = mongo_conn[db_name][trans_traces_col]; - trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1 })xxx" )); + ilog("init mongo"); + try { + auto client = mongo_pool->acquire(); + auto& mongo_conn = *client; - // action traces indexes - auto action_traces = mongo_conn[db_name][action_traces_col]; - action_traces.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + auto accounts = mongo_conn[db_name][accounts_col]; + if( accounts.count( make_document()) == 0 ) { + auto now = std::chrono::duration_cast( + std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()} ); - // pub_keys indexes - auto pub_keys = mongo_conn[db_name][pub_keys_col]; - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1 })xxx" )); - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1 })xxx" )); + auto doc = make_document( kvp( "name", name( chain::config::system_account_name ).to_string()), + kvp( "createdAt", b_date{now} )); - // account_controls indexes - auto account_controls = mongo_conn[db_name][account_controls_col]; - account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1 })xxx" )); - account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1 })xxx" )); + try { + if( !accounts.insert_one( doc.view())) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert account ${n}", + ("n", name( chain::config::system_account_name ).to_string())); + } + } catch (...) { + handle_mongo_exception( "account insert", __LINE__ ); + } - } catch(...) { - handle_mongo_exception("create indexes", __LINE__); + try { + // blocks indexes + auto blocks = mongo_conn[db_name][blocks_col]; + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + + auto block_states = mongo_conn[db_name][block_states_col]; + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + + // accounts indexes + accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); + + // transactions indexes + auto trans = mongo_conn[db_name][trans_col]; + trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + + auto trans_trace = mongo_conn[db_name][trans_traces_col]; + trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1 })xxx" )); + + // action traces indexes + auto action_traces = mongo_conn[db_name][action_traces_col]; + action_traces.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + + // pub_keys indexes + auto pub_keys = mongo_conn[db_name][pub_keys_col]; + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1 })xxx" )); + + // account_controls indexes + auto account_controls = mongo_conn[db_name][account_controls_col]; + account_controls.create_index( + bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1 })xxx" )); + account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1 })xxx" )); + + } catch (...) { + handle_mongo_exception( "create indexes", __LINE__ ); + } } + } catch (...) { + handle_mongo_exception( "mongo init", __LINE__ ); } ilog("starting db plugin thread"); @@ -1354,9 +1438,9 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc ("mongodb-store-action-traces", bpo::value()->default_value(true), "Enables storing action traces in mongodb.") ("mongodb-filter-on", bpo::value>()->composing(), - "Mongodb: Track actions which match receiver:action:actor. Actor may be blank to include all. Receiver and Action may not be blank. Default is * include everything.") + "Track actions which match receiver:action:actor. Receiver, Action, & Actor may be blank to include all. i.e. eosio:: or :transfer: Use * or leave unspecified to include all.") ("mongodb-filter-out", bpo::value>()->composing(), - "Mongodb: Do not track actions which match receiver:action:actor. Action and Actor both blank excludes all from reciever. Actor blank excludes all from reciever:action. Receiver may not be blank.") + "Do not track actions which match receiver:action:actor. Receiver, Action, & Actor may be blank to exclude all.") ; } @@ -1419,8 +1503,6 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) boost::split( v, s, boost::is_any_of( ":" )); EOS_ASSERT( v.size() == 3, fc::invalid_arg_exception, "Invalid value ${s} for --mongodb-filter-on", ("s", s)); filter_entry fe{v[0], v[1], v[2]}; - EOS_ASSERT( fe.receiver.value && fe.action.value, fc::invalid_arg_exception, - "Invalid value ${s} for --mongodb-filter-on", ("s", s)); my->filter_on.insert( fe ); } } else { @@ -1433,11 +1515,13 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) boost::split( v, s, boost::is_any_of( ":" )); EOS_ASSERT( v.size() == 3, fc::invalid_arg_exception, "Invalid value ${s} for --mongodb-filter-out", ("s", s)); filter_entry fe{v[0], v[1], v[2]}; - EOS_ASSERT( fe.receiver.value, fc::invalid_arg_exception, - "Invalid value ${s} for --mongodb-filter-out", ("s", s)); my->filter_out.insert( fe ); } } + if( options.count( "producer-name") ) { + wlog( "mongodb plugin not recommended on producer node" ); + my->is_producer = true; + } if( my->start_block_num == 0 ) { my->start_block_reached = true; @@ -1449,7 +1533,7 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) my->db_name = uri.database(); if( my->db_name.empty()) my->db_name = "EOS"; - my->mongo_conn = mongocxx::client{uri}; + my->mongo_pool.emplace(uri); // hook up to signals on controller chain_plugin* chain_plug = app().find_plugin(); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index da5d2ff0fb6..b9fbd0489d0 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -48,6 +49,9 @@ namespace fc { const fc::string logger_name("producer_plugin"); fc::logger _log; +const fc::string trx_trace_logger_name("transaction_tracing"); +fc::logger _trx_trace_log; + namespace eosio { static appbase::abstract_plugin& _producer_plugin = app().register_plugin(); @@ -340,12 +344,32 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.timestamp.to_time_point(); - auto send_response = [this, &trx, &next](const fc::static_variant& response) { + auto send_response = [this, &trx, &chain, &next](const fc::static_variant& response) { next(response); if (response.contains()) { _transaction_ack_channel.publish(std::pair(response.get(), trx)); + if (_pending_block_mode == pending_block_mode::producing) { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is REJECTING tx: ${txid} : ${why} ", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_state()->header.producer) + ("txid", trx->id()) + ("why",response.get()->what())); + } else { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why} ", + ("txid", trx->id()) + ("why",response.get()->what())); + } } else { _transaction_ack_channel.publish(std::pair(nullptr, trx)); + if (_pending_block_mode == pending_block_mode::producing) { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is ACCEPTING tx: ${txid}", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_state()->header.producer) + ("txid", trx->id())); + } else { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}", + ("txid", trx->id())); + } } }; @@ -372,6 +396,15 @@ class producer_plugin_impl : public std::enable_shared_from_thisexcept) { if (failure_is_subjective(*trace->except, deadline_is_subjective)) { _pending_incoming_transactions.emplace_back(trx, persist_until_expired, next); + if (_pending_block_mode == pending_block_mode::producing) { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_state()->header.producer) + ("txid", trx->id())); + } else { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", + ("txid", trx->id())); + } } else { auto e_ptr = trace->except->dynamic_copy_exception(); send_response(e_ptr); @@ -531,7 +564,13 @@ make_key_signature_provider(const private_key_type& key) { static producer_plugin_impl::signature_provider_type make_keosd_signature_provider(const std::shared_ptr& impl, const string& url_str, const public_key_type pubkey) { - auto keosd_url = fc::url(url_str); + fc::url keosd_url; + if(boost::algorithm::starts_with(url_str, "unix://")) + //send the entire string after unix:// to http_plugin. It'll auto-detect which part + // is the unix socket path, and which part is the url to hit on the server + keosd_url = fc::url("unix", url_str.substr(7), ostring(), ostring(), ostring(), ostring(), ovariant_object(), fc::optional()); + else + keosd_url = fc::url(url_str); std::weak_ptr weak_impl = impl; return [weak_impl, keosd_url, pubkey]( const chain::digest_type& digest ) { @@ -641,8 +680,13 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - if(fc::get_logger_map().find(logger_name) != fc::get_logger_map().end()) { - _log = fc::get_logger_map()[logger_name]; + auto& logger_map = fc::get_logger_map(); + if(logger_map.find(logger_name) != logger_map.end()) { + _log = logger_map[logger_name]; + } + + if( logger_map.find(trx_trace_logger_name) != logger_map.end()) { + _trx_trace_log = logger_map[trx_trace_logger_name]; } ilog("producer plugin: plugin_startup() begin"); @@ -976,8 +1020,29 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool // remove all persisted transactions that have now expired auto& persisted_by_id = _persistent_transactions.get(); auto& persisted_by_expiry = _persistent_transactions.get(); - while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pbs->header.timestamp.to_time_point()) { - persisted_by_expiry.erase(persisted_by_expiry.begin()); + if (!persisted_by_expiry.empty()) { + int num_expired_persistent = 0; + int orig_count = _persistent_transactions.size(); + + while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pbs->header.timestamp.to_time_point()) { + auto const& txid = persisted_by_expiry.begin()->trx_id; + if (_pending_block_mode == pending_block_mode::producing) { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_state()->header.producer) + ("txid", txid)); + } else { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", + ("txid", txid)); + } + + persisted_by_expiry.erase(persisted_by_expiry.begin()); + num_expired_persistent++; + } + + fc_dlog(_log, "Processed ${n} persisted transactions, Expired ${expired}", + ("n", orig_count) + ("expired", num_expired_persistent)); } try { @@ -1008,6 +1073,10 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool for (auto& trx: unapplied_trxs) { auto category = calculate_transaction_category(trx); if (category == tx_category::EXPIRED || (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) { + if (!_producers.empty()) { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", + ("txid", trx->id)); + } chain.drop_unapplied_transaction(trx); } else if (category == tx_category::PERSISTED || (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) { apply_trxs.emplace_back(std::move(trx)); @@ -1015,33 +1084,50 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool } } - for (const auto& trx: apply_trxs) { - if (block_time <= fc::time_point::now()) exhausted = true; - if (exhausted) { - break; - } + if (!apply_trxs.empty()) { + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; - try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { - deadline_is_subjective = true; - deadline = block_time; + for (const auto& trx: apply_trxs) { + if (block_time <= fc::time_point::now()) exhausted = true; + if (exhausted) { + break; } - auto trace = chain.push_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; + num_processed++; + + try { + auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + bool deadline_is_subjective = false; + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { + deadline_is_subjective = true; + deadline = block_time; + } + + auto trace = chain.push_transaction(trx, deadline); + if (trace->except) { + if (failure_is_subjective(*trace->except, deadline_is_subjective)) { + exhausted = true; + } else { + // this failed our configured maximum transaction time, we don't want to replay it + chain.drop_unapplied_transaction(trx); + num_failed++; + } } else { - // this failed our configured maximum transaction time, we don't want to replay it - chain.drop_unapplied_transaction(trx); + num_applied++; } - } - } catch ( const guard_exception& e ) { - app().get_plugin().handle_guard_exception(e); - return start_block_result::failed; - } FC_LOG_AND_DROP(); + } catch ( const guard_exception& e ) { + app().get_plugin().handle_guard_exception(e); + return start_block_result::failed; + } FC_LOG_AND_DROP(); + } + + fc_dlog(_log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed) + ("n", apply_trxs.size()) + ("applied", num_applied) + ("failed", num_failed)); } } @@ -1049,61 +1135,88 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool auto& blacklist_by_id = _blacklisted_transactions.get(); auto& blacklist_by_expiry = _blacklisted_transactions.get(); auto now = fc::time_point::now(); - while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) { - blacklist_by_expiry.erase(blacklist_by_expiry.begin()); + if(!blacklist_by_expiry.empty()) { + int num_expired = 0; + int orig_count = _blacklisted_transactions.size(); + + while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) { + blacklist_by_expiry.erase(blacklist_by_expiry.begin()); + num_expired++; + } + + fc_dlog(_log, "Processed ${n} blacklisted transactions, Expired ${expired}", + ("n", orig_count) + ("expired", num_expired)); } auto scheduled_trxs = chain.get_scheduled_transactions(); + if (!scheduled_trxs.empty()) { + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; + + for (const auto& trx : scheduled_trxs) { + if (block_time <= fc::time_point::now()) exhausted = true; + if (exhausted) { + break; + } - for (const auto& trx : scheduled_trxs) { - if (block_time <= fc::time_point::now()) exhausted = true; - if (exhausted) { - break; - } - - // configurable ratio of incoming txns vs deferred txns - while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); - --orig_pending_txn_size; - _incoming_trx_weight -= 1.0; - on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - } + num_processed++; - if (block_time <= fc::time_point::now()) { - exhausted = true; - break; - } + // configurable ratio of incoming txns vs deferred txns + while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { + auto e = _pending_incoming_transactions.front(); + _pending_incoming_transactions.pop_front(); + --orig_pending_txn_size; + _incoming_trx_weight -= 1.0; + on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + } - if (blacklist_by_id.find(trx) != blacklist_by_id.end()) { - continue; - } + if (block_time <= fc::time_point::now()) { + exhausted = true; + break; + } - try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { - deadline_is_subjective = true; - deadline = block_time; + if (blacklist_by_id.find(trx) != blacklist_by_id.end()) { + continue; } - auto trace = chain.push_scheduled_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; + try { + auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + bool deadline_is_subjective = false; + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { + deadline_is_subjective = true; + deadline = block_time; + } + + auto trace = chain.push_scheduled_transaction(trx, deadline); + if (trace->except) { + if (failure_is_subjective(*trace->except, deadline_is_subjective)) { + exhausted = true; + } else { + auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); + // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist + _blacklisted_transactions.insert(transaction_id_with_expiry{trx, expiration}); + num_failed++; + } } else { - auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); - // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist - _blacklisted_transactions.insert(transaction_id_with_expiry{trx, expiration}); + num_applied++; } - } - } catch ( const guard_exception& e ) { - app().get_plugin().handle_guard_exception(e); - return start_block_result::failed; - } FC_LOG_AND_DROP(); + } catch ( const guard_exception& e ) { + app().get_plugin().handle_guard_exception(e); + return start_block_result::failed; + } FC_LOG_AND_DROP(); + + _incoming_trx_weight += _incoming_defer_ratio; + if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; + } + + fc_dlog(_log, "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed) + ("n", scheduled_trxs.size()) + ("applied", num_applied) + ("failed", num_failed)); - _incoming_trx_weight += _incoming_defer_ratio; - if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; } } @@ -1112,12 +1225,16 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool } else { // attempt to apply any pending incoming transactions _incoming_trx_weight = 0.0; - while (orig_pending_txn_size && _pending_incoming_transactions.size()) { - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); - --orig_pending_txn_size; - on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - if (block_time <= fc::time_point::now()) return start_block_result::exhausted; + + if (!_pending_incoming_transactions.empty()) { + fc_dlog(_log, "Processing ${n} pending transactions"); + while (orig_pending_txn_size && _pending_incoming_transactions.size()) { + auto e = _pending_incoming_transactions.front(); + _pending_incoming_transactions.pop_front(); + --orig_pending_txn_size; + on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + if (block_time <= fc::time_point::now()) return start_block_result::exhausted; + } } return start_block_result::succeeded; } diff --git a/plugins/test_control_api_plugin/CMakeLists.txt b/plugins/test_control_api_plugin/CMakeLists.txt new file mode 100644 index 00000000000..0a36991e90b --- /dev/null +++ b/plugins/test_control_api_plugin/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB HEADERS "include/eosio/test_control_api_plugin/*.hpp") +add_library( test_control_api_plugin + test_control_api_plugin.cpp + ${HEADERS} ) + +target_link_libraries( test_control_api_plugin test_control_plugin chain_plugin http_plugin appbase ) +target_include_directories( test_control_api_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp b/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp new file mode 100644 index 00000000000..feac39a95ff --- /dev/null +++ b/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp @@ -0,0 +1,34 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once +#include +#include + +#include +#include + +namespace eosio { + using eosio::chain::controller; + using std::unique_ptr; + using namespace appbase; + + class test_control_api_plugin : public plugin { + public: + APPBASE_PLUGIN_REQUIRES((test_control_plugin)(chain_plugin)(http_plugin)) + + test_control_api_plugin(); + virtual ~test_control_api_plugin(); + + virtual void set_program_options(options_description&, options_description&) override; + + void plugin_initialize(const variables_map&); + void plugin_startup(); + void plugin_shutdown(); + + private: + unique_ptr my; + }; + +} diff --git a/plugins/test_control_api_plugin/test_control_api_plugin.cpp b/plugins/test_control_api_plugin/test_control_api_plugin.cpp new file mode 100644 index 00000000000..91d5535c796 --- /dev/null +++ b/plugins/test_control_api_plugin/test_control_api_plugin.cpp @@ -0,0 +1,63 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#include +#include + +#include + +namespace eosio { + +static appbase::abstract_plugin& _test_control_api_plugin = app().register_plugin(); + +using namespace eosio; + +class test_control_api_plugin_impl { +public: + test_control_api_plugin_impl(controller& db) + : db(db) {} + + controller& db; +}; + + +test_control_api_plugin::test_control_api_plugin(){} +test_control_api_plugin::~test_control_api_plugin(){} + +void test_control_api_plugin::set_program_options(options_description&, options_description&) {} +void test_control_api_plugin::plugin_initialize(const variables_map&) {} + +struct async_result_visitor : public fc::visitor { + template + std::string operator()(const T& v) const { + return fc::json::to_string(v); + } +}; + +#define CALL(api_name, api_handle, api_namespace, call_name, http_response_code) \ +{std::string("/v1/" #api_name "/" #call_name), \ + [this, api_handle](string, string body, url_response_callback cb) mutable { \ + try { \ + if (body.empty()) body = "{}"; \ + auto result = api_handle.call_name(fc::json::from_string(body).as()); \ + cb(http_response_code, fc::json::to_string(result)); \ + } catch (...) { \ + http_plugin::handle_exception(#api_name, #call_name, body, cb); \ + } \ + }} + +#define TEST_CONTROL_RW_CALL(call_name, http_response_code) CALL(test_control, rw_api, test_control_apis::read_write, call_name, http_response_code) + +void test_control_api_plugin::plugin_startup() { + my.reset(new test_control_api_plugin_impl(app().get_plugin().chain())); + auto rw_api = app().get_plugin().get_read_write_api(); + + app().get_plugin().add_api({ + TEST_CONTROL_RW_CALL(kill_node_on_producer, 202) + }); +} + +void test_control_api_plugin::plugin_shutdown() {} + +} diff --git a/plugins/test_control_plugin/CMakeLists.txt b/plugins/test_control_plugin/CMakeLists.txt new file mode 100644 index 00000000000..aa6b1cff397 --- /dev/null +++ b/plugins/test_control_plugin/CMakeLists.txt @@ -0,0 +1,9 @@ +file(GLOB HEADERS "include/eosio/test_control_plugin/*.hpp") + +add_library( test_control_plugin + test_control_plugin.cpp + ${HEADERS} ) + +target_link_libraries( test_control_plugin producer_plugin chain_plugin http_client_plugin appbase eosio_chain eos_utilities ) +target_include_directories( test_control_plugin + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp b/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp new file mode 100644 index 00000000000..0a40d9b6e36 --- /dev/null +++ b/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp @@ -0,0 +1,67 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once +#include +#include +#include +#include + +namespace fc { class variant; } + +namespace eosio { + using namespace appbase; + typedef std::shared_ptr test_control_ptr; + +namespace test_control_apis { +struct empty{}; + +class read_write { + + public: + read_write(const test_control_ptr& test_control) + : my(test_control) {} + + struct kill_node_on_producer_params { + name producer; + uint32_t where_in_sequence; + bool based_on_lib; + }; + using kill_node_on_producer_results = empty; + kill_node_on_producer_results kill_node_on_producer(const kill_node_on_producer_params& params) const; + + private: + test_control_ptr my; +}; + + +} // namespace test_control_apis + + +class test_control_plugin : public plugin { +public: + APPBASE_PLUGIN_REQUIRES((chain_plugin)) + + test_control_plugin(); + test_control_plugin(const test_control_plugin&) = delete; + test_control_plugin(test_control_plugin&&) = delete; + test_control_plugin& operator=(const test_control_plugin&) = delete; + test_control_plugin& operator=(test_control_plugin&&) = delete; + virtual ~test_control_plugin() override = default; + + virtual void set_program_options(options_description& cli, options_description& cfg) override; + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + + test_control_apis::read_write get_read_write_api() const { return test_control_apis::read_write(my); } + +private: + test_control_ptr my; +}; + +} + +FC_REFLECT(eosio::test_control_apis::empty, ) +FC_REFLECT(eosio::test_control_apis::read_write::kill_node_on_producer_params, (producer)(where_in_sequence)(based_on_lib) ) diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp new file mode 100644 index 00000000000..f2d630c6c59 --- /dev/null +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -0,0 +1,142 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#include +#include +#include + +namespace fc { class variant; } + +namespace eosio { + +static appbase::abstract_plugin& _test_control_plugin = app().register_plugin(); + +class test_control_plugin_impl { +public: + test_control_plugin_impl(chain::controller& c) : _chain(c) {} + void connect(); + void disconnect(); + void kill_on_lib(account_name prod, uint32_t where_in_seq); + void kill_on_head(account_name prod, uint32_t where_in_seq); + +private: + void accepted_block(const chain::block_state_ptr& bsp); + void applied_irreversible_block(const chain::block_state_ptr& bsp); + void retrieve_next_block_state(const chain::block_state_ptr& bsp); + void process_next_block_state(const chain::block_header_state& bhs); + + fc::optional _accepted_block_connection; + fc::optional _irreversible_block_connection; + chain::controller& _chain; + account_name _producer; + int32_t _where_in_sequence; + int32_t _producer_sequence; + bool _clean_producer_sequence; + std::atomic_bool _track_lib; + std::atomic_bool _track_head; +}; + +void test_control_plugin_impl::connect() { + _irreversible_block_connection.emplace( + _chain.irreversible_block.connect( [&]( const chain::block_state_ptr& bs ) { + applied_irreversible_block( bs ); + } )); + _accepted_block_connection = + _chain.accepted_block.connect( [&]( const chain::block_state_ptr& bs ) { + accepted_block( bs ); + } ); +} + +void test_control_plugin_impl::disconnect() { + _accepted_block_connection.reset(); + _irreversible_block_connection.reset(); +} + +void test_control_plugin_impl::applied_irreversible_block(const chain::block_state_ptr& bsp) { + if (_track_lib) + retrieve_next_block_state(bsp); +} + +void test_control_plugin_impl::accepted_block(const chain::block_state_ptr& bsp) { + if (_track_head) + retrieve_next_block_state(bsp); +} + +void test_control_plugin_impl::retrieve_next_block_state(const chain::block_state_ptr& bsp) { + const auto hbn = bsp->block_num; + auto new_block_header = bsp->header; + new_block_header.timestamp = new_block_header.timestamp.next(); + new_block_header.previous = bsp->id; + auto new_bs = bsp->generate_next(new_block_header.timestamp); + process_next_block_state(new_bs); +} + +void test_control_plugin_impl::process_next_block_state(const chain::block_header_state& bhs) { + const auto block_time = _chain.head_block_time() + fc::microseconds(chain::config::block_interval_us); + const auto& producer_name = bhs.get_scheduled_producer(block_time).producer_name; + // start counting sequences for this producer (once we + if (producer_name == _producer && _clean_producer_sequence) { + _producer_sequence += 1; + + if (_producer_sequence >= _where_in_sequence) { + app().quit(); + } + } else if (producer_name != _producer) { + _producer_sequence = -1; + // can now guarantee we are at the start of the producer + _clean_producer_sequence = true; + } +} + +void test_control_plugin_impl::kill_on_lib(account_name prod, uint32_t where_in_seq) { + _track_head = false; + _producer = prod; + _where_in_sequence = static_cast(where_in_seq); + _producer_sequence = -1; + _clean_producer_sequence = false; + _track_lib = true; +} + +void test_control_plugin_impl::kill_on_head(account_name prod, uint32_t where_in_seq) { + _track_lib = false; + _producer = prod; + _where_in_sequence = static_cast(where_in_seq); + _producer_sequence = -1; + _clean_producer_sequence = false; + _track_head = true; +} + +test_control_plugin::test_control_plugin() +{ +} + +void test_control_plugin::set_program_options(options_description& cli, options_description& cfg) { +} + +void test_control_plugin::plugin_initialize(const variables_map& options) { +} + +void test_control_plugin::plugin_startup() { + my.reset(new test_control_plugin_impl(app().get_plugin().chain())); + my->connect(); +} + +void test_control_plugin::plugin_shutdown() { + my->disconnect(); +} + +namespace test_control_apis { +read_write::kill_node_on_producer_results read_write::kill_node_on_producer(const read_write::kill_node_on_producer_params& params) const { + + if (params.based_on_lib) { + my->kill_on_lib(params.producer, params.where_in_sequence); + } else { + my->kill_on_head(params.producer, params.where_in_sequence); + } + return read_write::kill_node_on_producer_results{}; +} + +} // namespace test_control_apis + +} // namespace eosio diff --git a/plugins/wallet_plugin/wallet_manager.cpp b/plugins/wallet_plugin/wallet_manager.cpp index 6a7f3008b55..d851d8156f6 100644 --- a/plugins/wallet_plugin/wallet_manager.cpp +++ b/plugins/wallet_plugin/wallet_manager.cpp @@ -43,7 +43,7 @@ void wallet_manager::set_timeout(const std::chrono::seconds& t) { timeout = t; auto now = std::chrono::system_clock::now(); timeout_time = now + timeout; - EOS_ASSERT(timeout_time >= now, invalid_lock_timeout_exception, "Overflow on timeout_time, specified ${t}, now ${now}, timeout_time ${timeout_time}", + EOS_ASSERT(timeout_time >= now && timeout_time.time_since_epoch().count() > 0, invalid_lock_timeout_exception, "Overflow on timeout_time, specified ${t}, now ${now}, timeout_time ${timeout_time}", ("t", t.count())("now", now.time_since_epoch().count())("timeout_time", timeout_time.time_since_epoch().count())); } diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 380c054d44e..e2196d2bc9c 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -104,6 +104,13 @@ namespace eosio { namespace client { namespace http { parsed_url parse_url( const string& server_url ) { parsed_url res; + //unix socket doesn't quite follow classical "URL" rules so deal with it manually + if(boost::algorithm::starts_with(server_url, "unix://")) { + res.scheme = "unix"; + res.server = server_url.substr(strlen("unix://")); + return res; + } + //via rfc3986 and modified a bit to suck out the port number //Sadly this doesn't work for ipv6 addresses std::regex rgx(R"xx(^(([^:/?#]+):)?(//([^:/?#]*)(:(\d+))?)?([^?#]*)(\?([^#]*))?(#(.*))?)xx"); @@ -125,6 +132,9 @@ namespace eosio { namespace client { namespace http { } resolved_url resolve_url( const http_context& context, const parsed_url& url ) { + if(url.scheme == "unix") + return resolved_url(url); + tcp::resolver resolver(context->ios); boost::system::error_code ec; auto result = resolver.resolve(tcp::v4(), url.server, url.port, ec); @@ -207,7 +217,12 @@ namespace eosio { namespace client { namespace http { std::string re; try { - if(url.scheme == "http") { + if(url.scheme == "unix") { + boost::asio::local::stream_protocol::socket unix_socket(cp.context->ios); + unix_socket.connect(boost::asio::local::stream_protocol::endpoint(url.server)); + re = do_txrx(unix_socket, request, status_code); + } + else if(url.scheme == "http") { tcp::socket socket(cp.context->ios); do_connect(socket, url); re = do_txrx(socket, request, status_code); diff --git a/programs/cleos/httpc.hpp b/programs/cleos/httpc.hpp index 0fcd9b8490d..532f9b7623d 100644 --- a/programs/cleos/httpc.hpp +++ b/programs/cleos/httpc.hpp @@ -42,9 +42,12 @@ namespace eosio { namespace client { namespace http { { } + //used for unix domain, where resolving and ports are nonapplicable + resolved_url(const parsed_url& url) : parsed_url(url) {} + vector resolved_addresses; - uint16_t resolved_port; - bool is_loopback; + uint16_t resolved_port = 0; + bool is_loopback = false; }; resolved_url resolve_url( const http_context& context, @@ -86,6 +89,7 @@ namespace eosio { namespace client { namespace http { const string get_block_header_state_func = chain_func_base + "/get_block_header_state"; const string get_account_func = chain_func_base + "/get_account"; const string get_table_func = chain_func_base + "/get_table_rows"; + const string get_table_by_scope_func = chain_func_base + "/get_table_by_scope"; const string get_code_func = chain_func_base + "/get_code"; const string get_abi_func = chain_func_base + "/get_abi"; const string get_raw_code_and_abi_func = chain_func_base + "/get_raw_code_and_abi"; diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 4ce7c2dbbd3..b4acd6faf72 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -72,6 +72,7 @@ Usage: ./cleos create account [OPTIONS] creator name OwnerKey ActiveKey ``` */ +#include #include #include #include @@ -148,6 +149,22 @@ FC_DECLARE_EXCEPTION( localized_exception, 10000000, "an error occured" ); FC_MULTILINE_MACRO_END \ ) +//copy pasta from keosd's main.cpp +bfs::path determine_home_directory() +{ + bfs::path home; + struct passwd* pwd = getpwuid(getuid()); + if(pwd) { + home = pwd->pw_dir; + } + else { + home = getenv("HOME"); + } + if(home.empty()) + home = "./"; + return home; +} + string url = "http://127.0.0.1:8888/"; string wallet_url = "http://127.0.0.1:8900/"; bool no_verify = false; @@ -163,10 +180,13 @@ bool tx_skip_sign = false; bool tx_print_json = false; bool print_request = false; bool print_response = false; +bool no_auto_keosd = false; uint8_t tx_max_cpu_usage = 0; uint32_t tx_max_net_usage = 0; +uint32_t delaysec = 0; + vector tx_permission; eosio::client::http::http_context context; @@ -197,6 +217,8 @@ void add_standard_transaction_options(CLI::App* cmd, string default_permission = cmd->add_option("--max-cpu-usage-ms", tx_max_cpu_usage, localized("set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit)")); cmd->add_option("--max-net-usage", tx_max_net_usage, localized("set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit)")); + + cmd->add_option("--delay-sec", delaysec, localized("set the delay_sec seconds, defaults to 0s")); } vector get_account_permissions(const vector& permissions) { @@ -297,6 +319,7 @@ fc::variant push_transaction( signed_transaction& trx, int32_t extra_kcpu = 1000 trx.max_cpu_usage_ms = tx_max_cpu_usage; trx.max_net_usage_words = (tx_max_net_usage + 7)/8; + trx.delay_sec = delaysec; } if (!tx_skip_sign) { @@ -537,6 +560,17 @@ fc::variant regproducer_variant(const account_name& producer, const public_key_t ; } +chain::action create_open(const string& contract, const name& owner, symbol sym, const name& ram_payer) { + auto open_ = fc::mutable_variant_object + ("owner", owner) + ("symbol", sym) + ("ram_payer", ram_payer); + return action { + tx_permission.empty() ? vector{{ram_payer,config::active_name}} : get_account_permissions(tx_permission), + contract, "open", variant_to_bin( contract, N(open), open_ ) + }; +} + chain::action create_transfer(const string& contract, const name& sender, const name& recipient, asset amount, const string& memo ) { auto transfer = fc::mutable_variant_object @@ -545,23 +579,18 @@ chain::action create_transfer(const string& contract, const name& sender, const ("quantity", amount) ("memo", memo); - auto args = fc::mutable_variant_object - ("code", contract) - ("action", "transfer") - ("args", transfer); - return action { tx_permission.empty() ? vector{{sender,config::active_name}} : get_account_permissions(tx_permission), contract, "transfer", variant_to_bin( contract, N(transfer), transfer ) }; } -chain::action create_setabi(const name& account, const abi_def& abi) { +chain::action create_setabi(const name& account, const bytes& abi) { return action { tx_permission.empty() ? vector{{account,config::active_name}} : get_account_permissions(tx_permission), setabi{ .account = account, - .abi = fc::raw::pack(abi) + .abi = abi } }; } @@ -616,11 +645,11 @@ authority parse_json_authority_or_key(const std::string& authorityJsonOrFile) { } } -asset to_asset( const string& code, const string& s ) { - static map cache; +asset to_asset( account_name code, const string& s ) { + static map< pair, eosio::chain::symbol> cache; auto a = asset::from_string( s ); eosio::chain::symbol_code sym = a.get_symbol().to_symbol_code(); - auto it = cache.find( sym ); + auto it = cache.find( make_pair(code, sym) ); auto sym_str = a.symbol_name(); if ( it == cache.end() ) { auto json = call(get_currency_stats_func, fc::mutable_variant_object("json", false) @@ -631,7 +660,7 @@ asset to_asset( const string& code, const string& s ) { auto obj_it = obj.find( sym_str ); if (obj_it != obj.end()) { auto result = obj_it->value().as(); - auto p = cache.insert(make_pair( sym, result.max_supply.get_symbol() )); + auto p = cache.emplace( make_pair( code, sym ), result.max_supply.get_symbol() ); it = p.first; } else { EOS_THROW(symbol_type_exception, "Symbol ${s} is not supported by token contract ${c}", ("s", sym_str)("c", code)); @@ -649,7 +678,7 @@ asset to_asset( const string& code, const string& s ) { } inline asset to_asset( const string& s ) { - return to_asset( "eosio.token", s ); + return to_asset( N(eosio.token), s ); } struct set_account_permission_subcommand { @@ -766,6 +795,8 @@ void try_local_port( const string& lo_address, uint16_t port, uint32_t duration } void ensure_keosd_running(CLI::App* app) { + if (no_auto_keosd) + return; // get, version, net do not require keosd if (tx_skip_sign || app->got_subcommand("get") || app->got_subcommand("version") || app->got_subcommand("net")) return; @@ -1336,7 +1367,7 @@ struct buyram_subcommand { ("payer", from_str) ("receiver", receiver_str) ("bytes", fc::to_uint64(amount) * 1024ull); - send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyrambytes), act_payload)}); + send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyrambytes), act_payload)}); } else { fc::variant act_payload = fc::mutable_variant_object() ("payer", from_str) @@ -1448,6 +1479,13 @@ void get_account( const string& accountName, bool json_format ) { asset staked; asset unstaking; + if( res.core_liquid_balance.valid() ) { + unstaking = asset( 0, res.core_liquid_balance->get_symbol() ); // Correct core symbol for unstaking asset. + staked = asset( 0, res.core_liquid_balance->get_symbol() ); // Correct core symbol for staked asset. + } + + std::cout << "created: " << string(res.created) << std::endl; + if(res.privileged) std::cout << "privileged: true" << std::endl; constexpr size_t indent_size = 5; @@ -1470,14 +1508,14 @@ void get_account( const string& accountName, bool json_format ) { std::function dfs_print = [&]( account_name name, int depth ) -> void { auto& p = cache.at(name); std::cout << indent << std::string(depth*3, ' ') << name << ' ' << std::setw(5) << p.required_auth.threshold << ": "; + const char *sep = ""; for ( auto it = p.required_auth.keys.begin(); it != p.required_auth.keys.end(); ++it ) { - if ( it != p.required_auth.keys.begin() ) { - std::cout << ", "; - } - std::cout << it->weight << ' ' << string(it->key); + std::cout << sep << it->weight << ' ' << string(it->key); + sep = ", "; } for ( auto& acc : p.required_auth.accounts ) { - std::cout << acc.weight << ' ' << string(acc.permission.actor) << '@' << string(acc.permission.permission) << ", "; + std::cout << sep << acc.weight << ' ' << string(acc.permission.actor) << '@' << string(acc.permission.permission); + sep = ", "; } std::cout << std::endl; auto it = tree.find( name ); @@ -1720,6 +1758,7 @@ int main( int argc, char** argv ) { app.add_option( "-r,--header", header_opt_callback, localized("pass specific HTTP header; repeat this option to pass multiple headers")); app.add_flag( "-n,--no-verify", no_verify, localized("don't verify peer certificate when using HTTPS")); + app.add_flag( "--no-auto-keosd", no_auto_keosd, localized("don't automatically launch a keosd if one is not currently running")); app.set_callback([&app]{ ensure_keosd_running(&app);}); bool verbose_errors = false; @@ -1974,14 +2013,14 @@ int main( int argc, char** argv ) { uint32_t limit = 10; string index_position; auto getTable = get->add_subcommand( "table", localized("Retrieve the contents of a database table"), false); - getTable->add_option( "contract", code, localized("The contract who owns the table") )->required(); + getTable->add_option( "account", code, localized("The account who owns the table") )->required(); getTable->add_option( "scope", scope, localized("The scope within the contract in which the table is found") )->required(); getTable->add_option( "table", table, localized("The name of the table as specified by the contract abi") )->required(); getTable->add_option( "-b,--binary", binary, localized("Return the value as BINARY rather than using abi to interpret as JSON") ); getTable->add_option( "-l,--limit", limit, localized("The maximum number of rows to return") ); getTable->add_option( "-k,--key", table_key, localized("Deprecated") ); getTable->add_option( "-L,--lower", lower, localized("JSON representation of lower bound value of key, defaults to first") ); - getTable->add_option( "-U,--upper", upper, localized("JSON representation of upper bound value value of key, defaults to last") ); + getTable->add_option( "-U,--upper", upper, localized("JSON representation of upper bound value of key, defaults to last") ); getTable->add_option( "--index", index_position, localized("Index number, 1 - primary (first), 2 - secondary index (in order defined by multi_index), 3 - third index, etc.\n" "\t\t\t\tNumber or name of index can be specified, e.g. 'secondary' or '2'.")); @@ -2011,6 +2050,23 @@ int main( int argc, char** argv ) { << std::endl; }); + auto getScope = get->add_subcommand( "scope", localized("Retrieve a list of scopes and tables owned by a contract"), false); + getScope->add_option( "contract", code, localized("The contract who owns the table") )->required(); + getScope->add_option( "-t,--table", table, localized("The name of the table as filter") ); + getScope->add_option( "-l,--limit", limit, localized("The maximum number of rows to return") ); + getScope->add_option( "-L,--lower", lower, localized("lower bound of scope") ); + getScope->add_option( "-U,--upper", upper, localized("upper bound of scope") ); + getScope->set_callback([&] { + auto result = call(get_table_by_scope_func, fc::mutable_variant_object("code",code) + ("table",table) + ("lower_bound",lower) + ("upper_bound",upper) + ("limit",limit) + ); + std::cout << fc::json::to_pretty_string(result) + << std::endl; + }); + // currency accessors // get currency balance string symbol; @@ -2077,12 +2133,7 @@ int main( int argc, char** argv ) { getTransaction->add_option("id", transaction_id_str, localized("ID of the transaction to retrieve"))->required(); getTransaction->add_option( "-b,--block-hint", block_num_hint, localized("the block number this transaction may be in") ); getTransaction->set_callback([&] { - transaction_id_type transaction_id; - try { - while( transaction_id_str.size() < 64 ) transaction_id_str += "0"; - transaction_id = transaction_id_type(transaction_id_str); - } EOS_RETHROW_EXCEPTIONS(transaction_id_type_exception, "Invalid transaction ID: ${transaction_id}", ("transaction_id", transaction_id_str)) - auto arg= fc::mutable_variant_object( "id", transaction_id); + auto arg= fc::mutable_variant_object( "id", transaction_id_str); if ( block_num_hint > 0 ) { arg = arg("block_num_hint", block_num_hint); } @@ -2185,7 +2236,7 @@ int main( int argc, char** argv ) { auto getSchedule = get_schedule_subcommand{get}; auto getTransactionId = get_transaction_id_subcommand{get}; - + /* auto getTransactions = get->add_subcommand("transactions", localized("Retrieve all transactions with specific account name referenced in their scope"), false); getTransactions->add_option("account_name", account_name, localized("name of account to query on"))->required(); @@ -2251,45 +2302,57 @@ int main( int argc, char** argv ) { string wasmPath; string abiPath; bool shouldSend = true; + bool contract_clear = false; auto codeSubcommand = setSubcommand->add_subcommand("code", localized("Create or update the code on an account")); codeSubcommand->add_option("account", account, localized("The account to set code for"))->required(); - codeSubcommand->add_option("code-file", wasmPath, localized("The fullpath containing the contract WASM"))->required(); + codeSubcommand->add_option("code-file", wasmPath, localized("The fullpath containing the contract WASM"));//->required(); + codeSubcommand->add_flag( "-c,--clear", contract_clear, localized("Remove code on an account")); auto abiSubcommand = setSubcommand->add_subcommand("abi", localized("Create or update the abi on an account")); abiSubcommand->add_option("account", account, localized("The account to set the ABI for"))->required(); - abiSubcommand->add_option("abi-file", abiPath, localized("The fullpath containing the contract ABI"))->required(); + abiSubcommand->add_option("abi-file", abiPath, localized("The fullpath containing the contract ABI"));//->required(); + abiSubcommand->add_flag( "-c,--clear", contract_clear, localized("Remove abi on an account")); auto contractSubcommand = setSubcommand->add_subcommand("contract", localized("Create or update the contract on an account")); contractSubcommand->add_option("account", account, localized("The account to publish a contract for")) ->required(); - contractSubcommand->add_option("contract-dir", contractPath, localized("The path containing the .wasm and .abi")) - ->required(); + contractSubcommand->add_option("contract-dir", contractPath, localized("The path containing the .wasm and .abi")); + // ->required(); contractSubcommand->add_option("wasm-file", wasmPath, localized("The file containing the contract WASM relative to contract-dir")); // ->check(CLI::ExistingFile); auto abi = contractSubcommand->add_option("abi-file,-a,--abi", abiPath, localized("The ABI for the contract relative to contract-dir")); // ->check(CLI::ExistingFile); + contractSubcommand->add_flag( "-c,--clear", contract_clear, localized("Rmove contract on an account")); std::vector actions; auto set_code_callback = [&]() { - std::string wasm; - fc::path cpath(contractPath); + bytes code_bytes; + if(!contract_clear){ + std::string wasm; + fc::path cpath(contractPath); + + if( cpath.filename().generic_string() == "." ) cpath = cpath.parent_path(); + + if( wasmPath.empty() ) + wasmPath = (cpath / (cpath.filename().generic_string()+".wasm")).generic_string(); + else + wasmPath = (cpath / wasmPath).generic_string(); - if( cpath.filename().generic_string() == "." ) cpath = cpath.parent_path(); + std::cerr << localized(("Reading WASM from " + wasmPath + "...").c_str()) << std::endl; + fc::read_file_contents(wasmPath, wasm); + EOS_ASSERT( !wasm.empty(), wast_file_not_found, "no wasm file found ${f}", ("f", wasmPath) ); - if( wasmPath.empty() ) - wasmPath = (cpath / (cpath.filename().generic_string()+".wasm")).generic_string(); - else - wasmPath = (cpath / wasmPath).generic_string(); + const string binary_wasm_header("\x00\x61\x73\x6d\x01\x00\x00\x00", 8); + if(wasm.compare(0, 8, binary_wasm_header)) + std::cerr << localized("WARNING: ") << wasmPath << localized(" doesn't look like a binary WASM file. Is it something else, like WAST? Trying anyways...") << std::endl; + code_bytes = bytes(wasm.begin(), wasm.end()); - std::cerr << localized(("Reading WASM from " + wasmPath + "...").c_str()) << std::endl; - fc::read_file_contents(wasmPath, wasm); - EOS_ASSERT( !wasm.empty(), wast_file_not_found, "no wasm file found ${f}", ("f", wasmPath) ); + } else { + code_bytes = bytes(); + } - const string binary_wasm_header("\x00\x61\x73\x6d\x01\x00\x00\x00", 8); - if(wasm.compare(0, 8, binary_wasm_header)) - std::cerr << localized("WARNING: ") << wasmPath << localized(" doesn't look like a binary WASM file. Is it something else, like WAST? Trying anyways...") << std::endl; - actions.emplace_back( create_setcode(account, bytes(wasm.begin(), wasm.end()) ) ); + actions.emplace_back( create_setcode(account, code_bytes ) ); if ( shouldSend ) { std::cerr << localized("Setting Code...") << std::endl; send_actions(std::move(actions), 10000, packed_transaction::zlib); @@ -2297,19 +2360,27 @@ int main( int argc, char** argv ) { }; auto set_abi_callback = [&]() { - fc::path cpath(contractPath); - if( cpath.filename().generic_string() == "." ) cpath = cpath.parent_path(); + bytes abi_bytes; + if(!contract_clear){ + fc::path cpath(contractPath); + if( cpath.filename().generic_string() == "." ) cpath = cpath.parent_path(); + + if( abiPath.empty() ) { + abiPath = (cpath / (cpath.filename().generic_string()+".abi")).generic_string(); + } else { + abiPath = (cpath / abiPath).generic_string(); + } + + EOS_ASSERT( fc::exists( abiPath ), abi_file_not_found, "no abi file found ${f}", ("f", abiPath) ); + + abi_bytes = fc::raw::pack(fc::json::from_file(abiPath).as()); - if( abiPath.empty() ) { - abiPath = (cpath / (cpath.filename().generic_string()+".abi")).generic_string(); } else { - abiPath = (cpath / abiPath).generic_string(); + abi_bytes = bytes(); } - EOS_ASSERT( fc::exists( abiPath ), abi_file_not_found, "no abi file found ${f}", ("f", abiPath) ); - try { - actions.emplace_back( create_setabi(account, fc::json::from_file(abiPath).as()) ); + actions.emplace_back( create_setabi(account, abi_bytes) ); } EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON") if ( shouldSend ) { std::cerr << localized("Setting ABI...") << std::endl; @@ -2321,6 +2392,7 @@ int main( int argc, char** argv ) { add_standard_transaction_options(codeSubcommand, "account@active"); add_standard_transaction_options(abiSubcommand, "account@active"); contractSubcommand->set_callback([&] { + if(!contract_clear) EOS_ASSERT( !contractPath.empty(), contract_exception, " contract-dir is null ", ("f", contractPath) ); shouldSend = false; set_code_callback(); set_abi_callback(); @@ -2348,12 +2420,14 @@ int main( int argc, char** argv ) { string recipient; string amount; string memo; + bool pay_ram = false; auto transfer = app.add_subcommand("transfer", localized("Transfer EOS from account to account"), false); transfer->add_option("sender", sender, localized("The account sending EOS"))->required(); transfer->add_option("recipient", recipient, localized("The account receiving EOS"))->required(); transfer->add_option("amount", amount, localized("The amount of EOS to send"))->required(); transfer->add_option("memo", memo, localized("The memo for the transfer")); transfer->add_option("--contract,-c", con, localized("The contract which controls the token")); + transfer->add_flag("--pay-ram-to-open", pay_ram, localized("Pay ram to open recipient's token balance row")); add_standard_transaction_options(transfer, "sender@active"); transfer->set_callback([&] { @@ -2363,7 +2437,14 @@ int main( int argc, char** argv ) { tx_force_unique = false; } - send_actions({create_transfer(con, sender, recipient, to_asset(con, amount), memo)}); + auto transfer_amount = to_asset(con, amount); + auto transfer = create_transfer(con, sender, recipient, transfer_amount, memo); + if (!pay_ram) { + send_actions( { transfer }); + } else { + auto open_ = create_open(con, recipient, transfer_amount.get_symbol(), sender); + send_actions( { open_, transfer } ); + } }); // Net subcommand diff --git a/programs/eosio-abigen/main.cpp b/programs/eosio-abigen/main.cpp index c68f668588c..f15cd138ee4 100644 --- a/programs/eosio-abigen/main.cpp +++ b/programs/eosio-abigen/main.cpp @@ -86,6 +86,7 @@ int main(int argc, const char **argv) { abi_def output; try { vector actions; int result = Tool.run(create_find_macro_factory(contract, actions, abi_context).get()); if(!result) { + output.version = "eosio::abi/1.0"; result = Tool.run(create_factory(abi_verbose, abi_opt_sfs, abi_context, output, contract, actions).get()); if(!result) { abi_serializer abis(output, fc::seconds(1)); // No risk to client side serialization taking a long time diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 756e7cef2af..73cba43f337 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -17,6 +17,7 @@ #pragma GCC diagnostic ignored "-Wunused-result" #include #pragma GCC diagnostic pop +#include #include #include #include @@ -162,7 +163,7 @@ class host_def { return base_http_port - 100; } - bool is_local( ) { + bool is_local( ) const { return local_id.contains( host_name ); } @@ -230,6 +231,9 @@ class eosd_def { return dot_label_str; } + string get_node_num() const { + return name.substr( name.length() - 2 ); + } private: string dot_label_str; }; @@ -345,8 +349,7 @@ enum allowed_connection : char { class producer_names { public: - producer_names(int total_producers); - string producer_name(unsigned int producer_number) const; + static string producer_name(unsigned int producer_number); private: static const int total_chars = 12; static const char slot_chars[]; @@ -358,11 +361,7 @@ const char producer_names::valid_char_range = sizeof(producer_names::slot_chars) // for 26 or fewer total producers create "defproducera" .. "defproducerz" // above 26 produce "defproducera" .. "defproducerz", "defproduceaa" .. "defproducerb", etc. -producer_names::producer_names(int total_producers) -{ -} - -string producer_names::producer_name(unsigned int producer_number) const { +string producer_names::producer_name(unsigned int producer_number) { // keeping legacy "defproducer[a-z]", but if greater than valid_char_range, will use "defpraaaaaaa" char prod_name[] = "defproducera"; if (producer_number > valid_char_range) { @@ -407,6 +406,7 @@ struct launcher_def { bfs::path data_dir_base; bool skip_transaction_signatures = false; string eosd_extra_args; + std::map specific_nodeos_args; testnet_def network; string gelf_endpoint; vector aliases; @@ -457,10 +457,12 @@ struct launcher_def { void make_custom (); void write_dot_file (); void format_ssh (const string &cmd, const string &host_name, string &ssh_cmd_line); + void do_command(const host_def& host, const string& name, vector> env_pairs, const string& cmd); bool do_ssh (const string &cmd, const string &host_name); void prep_remote_config_dir (eosd_def &node, host_def *host); void launch (eosd_def &node, string >s); void kill (launch_modes mode, string sig_opt); + static string get_node_num(uint16_t node_num); pair find_node(uint16_t node_num); vector> get_nodes(const string& node_number_list); void bounce (const string& node_numbers); @@ -482,7 +484,9 @@ launcher_def::set_options (bpo::options_description &cfg) { ("p2p-plugin", bpo::value()->default_value("net"),"select a p2p plugin to use (either net or bnet). Defaults to net.") ("genesis,g",bpo::value(&genesis)->default_value("./genesis.json"),"set the path to genesis.json") ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), "nodeos does not require transaction signatures.") - ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg in quotes") + ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg(s) in quotes") + ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag") + ("specific-nodeos", bpo::value>()->composing(), "forward nodeos command line argument(s) to its paired specific instance of nodeos(using \"--specific-num\"), enclose arg(s) in quotes") ("delay,d",bpo::value(&start_delay)->default_value(0),"seconds delay before starting each node after the first") ("boot",bpo::bool_switch(&boot)->default_value(false),"After deploying the nodes and generating a boot script, invoke it.") ("nogen",bpo::bool_switch(&nogen)->default_value(false),"launch nodes without writing new config files") @@ -525,6 +529,25 @@ launcher_def::initialize (const variables_map &vmap) { } } + if (vmap.count("specific-num")) { + const auto specific_nums = vmap["specific-num"].as>(); + const auto specific_args = vmap["specific-nodeos"].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; + exit (-1); + } + const auto total_nodes = vmap["nodes"].as(); + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= total_nodes) { + cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + specific_nodeos_args[num] = specific_args[i]; + } + } + using namespace std::chrono; system_clock::time_point now = system_clock::now(); std::time_t now_c = system_clock::to_time_t(now); @@ -806,11 +829,11 @@ launcher_def::bind_nodes () { cerr << "Unable to allocate producers due to insufficient prod_nodes = " << prod_nodes << "\n"; exit (10); } - producer_names names(producers); int non_bios = prod_nodes - 1; int per_node = producers / non_bios; int extra = producers % non_bios; unsigned int i = 0; + unsigned int producer_number = 0; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; @@ -834,12 +857,11 @@ launcher_def::bind_nodes () { ++count; --extra; } - char ext = i; while (count--) { - const auto prodname = names.producer_name(ext); + const auto prodname = producer_names::producer_name(producer_number); node.producers.push_back(prodname); producer_set.schedule.push_back({prodname,pubkey}); - ext += non_bios; + ++producer_number; } } } @@ -1023,8 +1045,6 @@ launcher_def::write_config_file (tn_node_def &node) { } cfg << "blocks-dir = " << block_dir << "\n"; - cfg << "readonly = 0\n"; - cfg << "send-whole-blocks = true\n"; cfg << "http-server-address = " << host->host_name << ":" << instance.http_port << "\n"; cfg << "http-validate-host = false\n"; if (p2p == p2p_plugin::NET) { @@ -1498,6 +1518,12 @@ launcher_def::launch (eosd_def &instance, string >s) { eosdcmd += eosd_extra_args + " "; } } + if (instance.name != "bios" && !specific_nodeos_args.empty()) { + const auto node_num = boost::lexical_cast(instance.get_node_num()); + if (specific_nodeos_args.count(node_num)) { + eosdcmd += specific_nodeos_args[node_num] + " "; + } + } if( add_enable_stale_production ) { eosdcmd += "--enable-stale-production true "; @@ -1595,11 +1621,16 @@ launcher_def::kill (launch_modes mode, string sig_opt) { } } +string +launcher_def::get_node_num(uint16_t node_num) { + string node_num_str = node_num < 10 ? "0":""; + node_num_str += boost::lexical_cast(node_num); + return node_num_str; +} + pair launcher_def::find_node(uint16_t node_num) { - string dex = node_num < 10 ? "0":""; - dex += boost::lexical_cast(node_num); - string node_name = network.name + dex; + const string node_name = network.name + get_node_num(node_num); for (const auto& host: bindings) { for (const auto& node: host.instances) { if (node_name == node.name) { @@ -1643,22 +1674,40 @@ launcher_def::get_nodes(const string& node_number_list) { return node_list; } +void +launcher_def::do_command(const host_def& host, const string& name, + vector> env_pairs, const string& cmd) { + if (!host.is_local()) { + string rcmd = "cd " + host.eosio_home + "; "; + for (auto& env_pair : env_pairs) { + rcmd += "export " + env_pair.first + "=" + env_pair.second + "; "; + } + rcmd += cmd; + if (!do_ssh(rcmd, host.host_name)) { + cerr << "Remote command failed for " << name << endl; + exit (-1); + } + } + else { + bp::environment e; + for (auto& env_pair : env_pairs) { + e.emplace(env_pair.first, env_pair.second); + } + bp::child c(cmd, e); + c.wait(); + } +} + void launcher_def::bounce (const string& node_numbers) { auto node_list = get_nodes(node_numbers); for (auto node_pair: node_list) { const host_def& host = node_pair.first; const eosd_def& node = node_pair.second; - string node_num = node.name.substr( node.name.length() - 2 ); - string cmd = "cd " + host.eosio_home + "; " - + "export EOSIO_HOME=" + host.eosio_home + string("; ") - + "export EOSIO_NODE=" + node_num + "; " - + "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; + const string node_num = node.get_node_num(); cout << "Bouncing " << node.name << endl; - if (!do_ssh(cmd, host.host_name)) { - cerr << "Unable to bounce " << node.name << endl; - exit (-1); - } + string cmd = "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; + do_command(host, node.name, { { "EOSIO_HOME", host.eosio_home }, { "EOSIO_NODE", node_num } }, cmd); } } @@ -1668,17 +1717,12 @@ launcher_def::down (const string& node_numbers) { for (auto node_pair: node_list) { const host_def& host = node_pair.first; const eosd_def& node = node_pair.second; - string node_num = node.name.substr( node.name.length() - 2 ); - string cmd = "cd " + host.eosio_home + "; " - + "export EOSIO_HOME=" + host.eosio_home + "; " - + "export EOSIO_NODE=" + node_num + "; " - + "export EOSIO_TN_RESTART_CONFIG_DIR=" + node.config_dir_name + "; " - + "./scripts/eosio-tn_down.sh"; + const string node_num = node.get_node_num(); cout << "Taking down " << node.name << endl; - if (!do_ssh(cmd, host.host_name)) { - cerr << "Unable to down " << node.name << endl; - exit (-1); - } + string cmd = "./scripts/eosio-tn_down.sh "; + do_command(host, node.name, + { { "EOSIO_HOME", host.eosio_home }, { "EOSIO_NODE", node_num }, { "EOSIO_TN_RESTART_CONFIG_DIR", node.config_dir_name } }, + cmd); } } @@ -1689,13 +1733,8 @@ launcher_def::roll (const string& host_names) { for (string host_name: hosts) { cout << "Rolling " << host_name << endl; auto host = find_host_by_name_or_address(host_name); - string cmd = "cd " + host->eosio_home + "; " - + "export EOSIO_HOME=" + host->eosio_home + "; " - + "./scripts/eosio-tn_roll.sh"; - if (!do_ssh(cmd, host_name)) { - cerr << "Unable to roll " << host << endl; - exit (-1); - } + string cmd = "./scripts/eosio-tn_roll.sh "; + do_command(*host, host_name, { { "EOSIO_HOME", host->eosio_home } }, cmd); } } diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index ac434e92c71..a332f8e26b1 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -9,11 +9,14 @@ if( GPERFTOOLS_FOUND ) list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) endif() +configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) + target_link_libraries( ${KEY_STORE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE wallet_api_plugin wallet_plugin PRIVATE http_plugin PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) +target_include_directories(${KEY_STORE_EXECUTABLE_NAME} PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) mas_sign(${KEY_STORE_EXECUTABLE_NAME}) diff --git a/programs/keosd/config.hpp.in b/programs/keosd/config.hpp.in new file mode 100644 index 00000000000..a7d34f390dc --- /dev/null +++ b/programs/keosd/config.hpp.in @@ -0,0 +1,11 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + * + * \warning This file is machine generated. DO NOT EDIT. See config.hpp.in for changes. + */ +#pragma once + +namespace eosio { namespace keosd { namespace config { + const string key_store_executable_name = "${KEY_STORE_EXECUTABLE_NAME}"; +}}} diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index ce1588fccd6..58a42d96b30 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -14,6 +14,7 @@ #include #include +#include "config.hpp" using namespace appbase; using namespace eosio; @@ -39,6 +40,11 @@ int main(int argc, char** argv) bfs::path home = determine_home_directory(); app().set_default_data_dir(home / "eosio-wallet"); app().set_default_config_dir(home / "eosio-wallet"); + http_plugin::set_defaults({ + .address_config_prefix = "", + .default_unix_socket_path = keosd::config::key_store_executable_name + ".sock", + .default_http_port = 8900 + }); app().register_plugin(); if(!app().initialize(argc, argv)) return -1; diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 5c190f69145..82ce6470789 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -51,7 +51,7 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE -Wl,${whole_archive_flag} login_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_plugin -Wl,${no_whole_archive_flag} - PRIVATE -Wl,${whole_archive_flag} bnet_plugin -Wl,${no_whole_archive_flag} + PRIVATE -Wl,${whole_archive_flag} bnet_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} chain_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} net_plugin -Wl,${no_whole_archive_flag} @@ -60,6 +60,8 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} txn_test_gen_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} db_size_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} producer_api_plugin -Wl,${no_whole_archive_flag} + PRIVATE -Wl,${whole_archive_flag} test_control_plugin -Wl,${no_whole_archive_flag} + PRIVATE -Wl,${whole_archive_flag} test_control_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${build_id_flag} PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 02c55ae9ff5..f17aa231105 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -99,6 +99,11 @@ int main(int argc, char** argv) auto root = fc::app_path(); app().set_default_data_dir(root / "eosio/nodeos/data" ); app().set_default_config_dir(root / "eosio/nodeos/config" ); + http_plugin::set_defaults({ + .address_config_prefix = "", + .default_unix_socket_path = "", + .default_http_port = 8888 + }); if(!app().initialize(argc, argv)) return INITIALIZE_FAIL; initialize_logging(); diff --git a/scripts/eosio-tn_bounce.sh b/scripts/eosio-tn_bounce.sh index 7062836c92c..55ef1d78159 100755 --- a/scripts/eosio-tn_bounce.sh +++ b/scripts/eosio-tn_bounce.sh @@ -41,4 +41,4 @@ else fi bash $EOSIO_HOME/scripts/eosio-tn_down.sh -bash $EOSIO_HOME/scripts/eosio-tn_up.sh $* +bash $EOSIO_HOME/scripts/eosio-tn_up.sh "$*" diff --git a/scripts/eosio-tn_roll.sh b/scripts/eosio-tn_roll.sh index 7c8f665c880..1b131edb0fa 100755 --- a/scripts/eosio-tn_roll.sh +++ b/scripts/eosio-tn_roll.sh @@ -82,10 +82,10 @@ cp $SDIR/$RD/$prog $RD/$prog if [ $DD = "all" ]; then for EOSIO_RESTART_DATA_DIR in `ls -d var/lib/node_??`; do - bash $EOSIO_HOME/scripts/eosio-tn_up.sh $* + bash $EOSIO_HOME/scripts/eosio-tn_up.sh "$*" done else - bash $EOSIO_HOME/scripts/eosio-tn_up.sh $* + bash $EOSIO_HOME/scripts/eosio-tn_up.sh "$*" fi unset EOSIO_RESTART_DATA_DIR diff --git a/scripts/eosio-tn_up.sh b/scripts/eosio-tn_up.sh index 895322a5eee..058ab16ed90 100755 --- a/scripts/eosio-tn_up.sh +++ b/scripts/eosio-tn_up.sh @@ -9,6 +9,8 @@ connected="0" rundir=programs/nodeos prog=nodeos +# Quote any args that are "*", so they are not expanded +qargs=`echo "$*" | sed -e 's/ \* / "*" /' -e 's/ \*$/ "*"/'` if [ "$PWD" != "$EOSIO_HOME" ]; then echo $0 must only be run from $EOSIO_HOME @@ -33,8 +35,8 @@ rm $datadir/stderr.txt ln -s $log $datadir/stderr.txt relaunch() { - echo "$rundir/$prog $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log " - nohup $rundir/$prog $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log & + echo "$rundir/$prog $qargs $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log " + nohup $rundir/$prog $qargs $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log & pid=$! echo pid = $pid echo $pid > $datadir/$prog.pid @@ -56,7 +58,7 @@ relaunch() { if [ -z "$EOSIO_LEVEL" ]; then echo starting with no modifiers - relaunch $* + relaunch if [ "$connected" -eq 0 ]; then EOSIO_LEVEL=replay else @@ -66,7 +68,7 @@ fi if [ "$EOSIO_LEVEL" == replay ]; then echo starting with replay - relaunch $* --hard-replay-blockchain + relaunch --hard-replay-blockchain if [ "$connected" -eq 0 ]; then EOSIO_LEVEL=resync else @@ -75,5 +77,5 @@ if [ "$EOSIO_LEVEL" == replay ]; then fi if [ "$EOSIO_LEVEL" == resync ]; then echo starting with delete-all-blocks - relaunch $* --delete-all-blocks + relaunch --delete-all-blocks fi diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index dc54cbfe0bd..4fd12dd3ad0 100644 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -528,63 +528,6 @@ fi printf "\\tMongo C++ driver found at /usr/local/lib64/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - printf "\\tInstalling secp256k1-zkp (Cryptonomex branch).\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\n\\tUnable to cd into directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tError running configure for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tError removing directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tsecp256k1 successfully installed @ /usr/local/lib/libsecp256k1.a.\\n" - else - printf "\\tsecp256k1 found @ /usr/local/lib/libsecp256k1.a.\\n" - fi - printf "\\n\\tChecking LLVM with WASM support.\\n" if [ ! -d "${HOME}/opt/wasm/bin" ]; then printf "\\tInstalling LLVM & WASM.\\n" diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index de0193a4b42..085d80839fb 100644 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -618,63 +618,6 @@ mongodconf printf "\\tMongo C++ driver found at /usr/local/lib64/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - printf "\\tInstalling secp256k1-zkp (Cryptonomex branch).\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\n\\tUnable to cd into directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tError running configure for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tError removing directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\n\\tsecp256k1 successfully installed @ /usr/local/lib.\\n\\n" - else - printf "\\tsecp256k1 found @ /usr/local/lib.\\n" - fi - printf "\\n\\tChecking LLVM with WASM support installation.\\n" if [ ! -d "${HOME}/opt/wasm/bin" ]; then printf "\\n\\tInstalling LLVM with WASM\\n" diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index aac05df8f43..c6e28fb73f6 100644 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -396,62 +396,6 @@ printf "\\tMongo C++ driver found at /usr/local/lib/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tUnable to enter directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tConfiguring secp256k1-zkp has returned the above error.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${CPU_CORE}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tInstalling secp256k1-zkp has returned the above error.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tUnable to remove directory %s/secp256k1-zkp56k1-zkp.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\n\\n\\tSuccessffully installed secp256k1 @ /usr/local/lib/.\\n\\n" - else - printf "\\tsecp256k1 found at /usr/local/lib/.\\n" - fi - printf "\\n\\tChecking LLVM with WASM support.\\n" if [ ! -d /usr/local/wasm/bin ]; then if ! cd "${TEMP_DIR}" diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 2bfbf0ec28d..35cad3d7d8e 100644 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -393,63 +393,6 @@ printf "\\tMongo C++ driver found at /usr/local/lib64/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - printf "\\tInstalling secp256k1-zkp (Cryptonomex branch).\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}/secp256k1-zkp" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tError running configure for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tError removing directory %s.\\n" "${TEMP_DIR}/secp256k1-zkp" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\n\\tsecp256k1 successfully installed @ /usr/local/lib.\\n\\n" - else - printf "\\tsecp256k1 found @ /usr/local/lib.\\n" - fi - printf "\\n\\tChecking LLVM with WASM support installation.\\n" if [ ! -d "${HOME}/opt/wasm/bin" ]; then printf "\\tInstalling LLVM & WASM\\n" diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index c1451a555c0..4c9873a60a1 100644 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -421,63 +421,6 @@ mongodconf printf "\\tMongo C++ driver found at /usr/local/lib/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - printf "\\tInstalling secp256k1-zkp (Cryptonomex branch).\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}/secp256k1-zkp" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tError running configure for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tError removing directory %s.\\n" "${TEMP_DIR}/secp256k1-zkp" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\n\\tsecp256k1 successfully installed @ /usr/local/lib.\\n\\n" - else - printf "\\tsecp256k1 found @ /usr/local/lib.\\n" - fi - printf "\\n\\tChecking for LLVM with WASM support.\\n" if [ ! -d "${HOME}/opt/wasm/bin" ]; then # Build LLVM and clang with WASM support: diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 50b12cf1cb0..417eb35f07c 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -21,7 +21,7 @@ target_link_libraries( plugin_test eosio_testing eosio_chain chainbase eos_utili target_include_directories( plugin_test PUBLIC ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include ${CMAKE_SOURCE_DIR}/plugins/chain_plugin/include ) -add_dependencies(plugin_test asserter test_api test_api_mem test_api_db test_api_multi_index exchange proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig) +add_dependencies(plugin_test asserter test_api test_api_mem test_api_db test_api_multi_index proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig) # configure_file(${CMAKE_CURRENT_SOURCE_DIR}/core_symbol.py.in ${CMAKE_CURRENT_BINARY_DIR}/core_symbol.py) @@ -36,31 +36,45 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-test.py ${CM configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-remote-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-remote-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRENT_BINARY_DIR}/sample-cluster-map.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/restart-scenarios-test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_forked_chain_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_forked_chain_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_remote_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_under_min_avail_ram.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_under_min_avail_ram.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_voting_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) #To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output) add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME bnet_nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST bnet_nodeos_run_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) if(BUILD_MONGO_DB_PLUGIN) add_test(NAME nodeos_run_test-mongodb COMMAND tests/nodeos_run_test.py --mongodb -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) endif() add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST restart-scenarios-test-hard_replay PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-none COMMAND tests/restart-scenarios-test.py -c none --kill-sig term -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST restart-scenarios-test-none PROPERTY LABELS nonparallelizable_tests) # TODO: add_test(NAME consensus-validation-malicious-producers COMMAND tests/consensus-validation-malicious-producers.py -w 80 --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST validate_dirty_db_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME launcher_test COMMAND tests/launcher_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -71,6 +85,9 @@ set_property(TEST bnet_nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) #add_test(NAME distributed_transactions_lr_test COMMAND tests/distributed-transactions-test.py -d 2 -p 21 -n 21 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) #set_property(TEST distributed_transactions_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests) + add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) diff --git a/tests/Cluster.py b/tests/Cluster.py index 3a33263358c..8a9a1cb4b8f 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -18,6 +18,7 @@ from core_symbol import CORE_SYMBOL from testUtils import Utils from testUtils import Account +from Node import BlockType from Node import Node from WalletMgr import WalletMgr @@ -30,6 +31,7 @@ class Cluster(object): __localHost="localhost" __BiosHost="localhost" __BiosPort=8788 + __LauncherCmdArr=[] # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -97,15 +99,25 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-branches # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontKill=False - , dontBootstrap=False, totalProducers=None, extraNodeosArgs=None): + , dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count - prodCount: producers per prodcuer node count - topo: cluster topology (as defined by launcher) + prodCount: producers per producer node count + topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) delay: delay between individual nodes launch (as defined by launcher) delay 0 exposes a bootstrap bug where producer handover may have a large gap confusing nodes and bringing system to a halt. + onlyBios: When true, only loads the bios contract (and not more full bootstrapping). + dontBootstrap: When true, don't do any bootstrapping at all. + extraNodeosArgs: string of arguments to pass through to each nodoes instance (via --nodeos flag on launcher) + useBiosBootFile: determines which of two bootstrap methods is used (when both dontBootstrap and onlyBios are false). + The default value of true uses the bios_boot.sh file generated by the launcher. + A value of false uses manual bootstrapping in this script, which does not do things like stake votes for producers. + specificExtraNodeosArgs: dictionary of arguments to pass to a specific node (via --specific-num and + --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } """ + assert(isinstance(topo, str)) + if not self.localCluster: Utils.Print("WARNING: Cluster not local, not launching %s." % (Utils.EosServerName)) return True @@ -126,14 +138,14 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne tries = tries - 1 time.sleep(2) - cmd="%s -p %s -n %s -s %s -d %s -i %s -f --p2p-plugin %s %s" % ( - Utils.EosLauncherPath, pnodes, totalNodes, topo, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], + cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s" % ( + Utils.EosLauncherPath, pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], p2pPlugin, producerFlag) cmdArr=cmd.split() if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time 50000 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time 990000 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: @@ -148,6 +160,139 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--nodeos") cmdArr.append(nodeosArgs) + if specificExtraNodeosArgs is not None: + assert(isinstance(specificExtraNodeosArgs, dict)) + for nodeNum,arg in specificExtraNodeosArgs.items(): + assert(isinstance(nodeNum, (str,int))) + assert(isinstance(arg, str)) + cmdArr.append("--specific-num") + cmdArr.append(str(nodeNum)) + cmdArr.append("--specific-nodeos") + cmdArr.append(arg) + + # must be last cmdArr.append before subprocess.call, so that everything is on the command line + # before constructing the shape.json file for "bridge" + if topo=="bridge": + shapeFilePrefix="shape_bridge" + shapeFile=shapeFilePrefix+".json" + cmdArrForOutput=copy.deepcopy(cmdArr) + cmdArrForOutput.append("--output") + cmdArrForOutput.append(shapeFile) + s=" ".join(cmdArrForOutput) + if Utils.Debug: Utils.Print("cmd: %s" % (s)) + if 0 != subprocess.call(cmdArrForOutput): + Utils.Print("ERROR: Launcher failed to create shape file \"%s\"." % (shapeFile)) + return False + + f = open(shapeFile, "r") + shapeFileJsonStr = f.read() + f.close() + shapeFileObject = json.loads(shapeFileJsonStr) + Utils.Print("shapeFileObject=%s" % (shapeFileObject)) + # retrieve the nodes, which as a map of node name to node definition, which the fc library prints out as + # an array of array, the first level of arrays is the pair entries of the map, the second is an array + # of two entries - [ , ] with first being the name and second being the node definition + shapeFileNodes = shapeFileObject["nodes"] + + numProducers=totalProducers if totalProducers is not None else totalNodes + maxProducers=ord('z')-ord('a')+1 + assert numProducers 0) - targetHeadBlockNum=self.nodes[0].getHeadBlockNum() #get root nodes head block num - if Utils.Debug: Utils.Print("Head block number on root node: %d" % (targetHeadBlockNum)) - if targetHeadBlockNum == -1: + node=self.nodes[0] + targetBlockNum=node.getBlockNum(blockType) #retrieve node 0's head or irrevercible block number + targetBlockNum+=blockAdvancing + if Utils.Debug: + Utils.Print("%s block number on root node: %d" % (blockType.type, targetBlockNum)) + if targetBlockNum == -1: return False - return self.waitOnClusterBlockNumSync(targetHeadBlockNum, timeout) + return self.waitOnClusterBlockNumSync(targetBlockNum, timeout) - def waitOnClusterBlockNumSync(self, targetBlockNum, timeout=None): + def waitOnClusterBlockNumSync(self, targetBlockNum, timeout=None, blockType=BlockType.head): """Wait for all nodes to have targetBlockNum finalized.""" assert(self.nodes) - def doNodesHaveBlockNum(nodes, targetBlockNum): + def doNodesHaveBlockNum(nodes, targetBlockNum, blockType): for node in nodes: try: - if (not node.killed) and (not node.isBlockPresent(targetBlockNum)): + if (not node.killed) and (not node.isBlockPresent(targetBlockNum, blockType=blockType)): return False except (TypeError) as _: # This can happen if client connects before server is listening @@ -325,7 +482,7 @@ def doNodesHaveBlockNum(nodes, targetBlockNum): return True - lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum) + lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType) ret=Utils.waitForBool(lam, timeout) return ret @@ -437,10 +594,10 @@ def populateWallet(self, accountsCount, wallet): def getNode(self, nodeId=0, exitOnError=True): if exitOnError and nodeId >= len(self.nodes): Utils.cmdError("cluster never created node %d" % (nodeId)) - errorExit("Failed to retrieve node %d" % (nodeId)) + Utils.errorExit("Failed to retrieve node %d" % (nodeId)) if exitOnError and self.nodes[nodeId] is None: Utils.cmdError("cluster has None value for node %d" % (nodeId)) - errorExit("Failed to retrieve node %d" % (nodeId)) + Utils.errorExit("Failed to retrieve node %d" % (nodeId)) return self.nodes[nodeId] def getNodes(self): @@ -576,7 +733,7 @@ def validateAccounts(self, accounts, testSysAccounts=True): node.validateAccounts(myAccounts) - def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet=100, stakeCPU=100, buyRAM=100): + def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet=100, stakeCPU=100, buyRAM=10000): """create account, verify account and return transaction id""" assert(len(self.nodes) > 0) node=self.nodes[0] @@ -597,7 +754,7 @@ def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet= # return transId # return None - def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=100, exitOnError=False): + def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False): assert(len(self.nodes) > 0) node=self.nodes[0] trans=node.createInitializeAccount(account, creatorAccount, stakedDeposit, waitForTransBlock, stakeNet=stakeNet, stakeCPU=stakeCPU, buyRAM=buyRAM) @@ -623,6 +780,7 @@ def parseProducerKeys(configFile, nodeName): pattern=r"^\s*private-key\s*=\W+(\w+)\W+(\w+)\W+$" m=re.search(pattern, configStr, re.MULTILINE) + regMsg="None" if m is None else "NOT None" if m is None: if Utils.Debug: Utils.Print("Failed to find producer keys") return None @@ -684,13 +842,13 @@ def parseClusterKeys(totalNodes): keys=Cluster.parseProducerKeys(configFile, node) if keys is not None: producerKeys.update(keys) + keyMsg="None" if keys is None else len(keys) return producerKeys @staticmethod - def bootstrap(totalNodes, prodCount, biosHost, biosPort, dontKill=False, onlyBios=False): - """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. - Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" + def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): + """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" Utils.Print("Starting cluster bootstrap.") biosNode=Node(biosHost, biosPort) @@ -698,6 +856,21 @@ def bootstrap(totalNodes, prodCount, biosHost, biosPort, dontKill=False, onlyBio Utils.Print("ERROR: Bios node doesn't appear to be running...") return None + cmd="bash bios_boot.sh" + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): + if not silent: Utils.Print("Launcher failed to shut down eos cluster.") + return None + + p = re.compile('error', re.IGNORECASE) + bootlog="eosio-ignition-wd/bootlog.txt" + with open(bootlog) as bootFile: + for line in bootFile: + if p.search(line): + Utils.Print("ERROR: bios_boot.sh script resulted in errors. See %s" % (bootlog)) + Utils.Print(line) + return None + producerKeys=Cluster.parseClusterKeys(totalNodes) # should have totalNodes node plus bios node if producerKeys is None or len(producerKeys) < (totalNodes+1): @@ -713,6 +886,87 @@ def bootstrap(totalNodes, prodCount, biosHost, biosPort, dontKill=False, onlyBio return None biosNode.setWalletEndpointArgs(walletMgr.walletEndpointArgs) + try: + ignWallet=walletMgr.create("ignition") + if ignWallet is None: + Utils.Print("ERROR: Failed to create ignition wallet.") + return None + + eosioName="eosio" + eosioKeys=producerKeys[eosioName] + eosioAccount=Account(eosioName) + eosioAccount.ownerPrivateKey=eosioKeys["private"] + eosioAccount.ownerPublicKey=eosioKeys["public"] + eosioAccount.activePrivateKey=eosioKeys["private"] + eosioAccount.activePublicKey=eosioKeys["public"] + producerKeys.pop(eosioName) + + if not walletMgr.importKey(eosioAccount, ignWallet): + Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) + return None + + initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) + Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) + trans=None + contract="eosio.token" + action="transfer" + for name, keys in producerKeys.items(): + data="{\"from\":\"eosio\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (name, initialFunds, "init transfer") + opts="--permission eosio@active" + if name != "eosio": + trans=biosNode.pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to transfer funds from eosio.token to %s." % (name)) + return None + + Node.validateTransaction(trans[1]) + + Utils.Print("Wait for last transfer transaction to become finalized.") + transId=Node.getTransId(trans[1]) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None + + Utils.Print("Cluster bootstrap done.") + finally: + if not dontKill: + walletMgr.killall() + walletMgr.cleanup() + + return biosNode + + @staticmethod + def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKill=False, onlyBios=False): + """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. + Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" + + Utils.Print("Starting cluster bootstrap.") + if totalProducers is None: + totalProducers=totalNodes + + biosNode=Node(biosHost, biosPort) + if not biosNode.checkPulse(): + Utils.Print("ERROR: Bios node doesn't appear to be running...") + return None + + producerKeys=Cluster.parseClusterKeys(totalNodes) + # should have totalNodes node plus bios node + if producerKeys is None: + Utils.Print("ERROR: Failed to parse any producer keys from config files.") + return None + elif len(producerKeys) < (totalProducers+1): + Utils.Print("ERROR: Failed to parse %d producer keys from cluster config files, only found %d." % (totalProducers+1,len(producerKeys))) + return None + + walletMgr=WalletMgr(True) + walletMgr.killall() + walletMgr.cleanup() + + if not walletMgr.launch(): + Utils.Print("ERROR: Failed to launch bootstrap wallet.") + return None + biosNode.setWalletEndpointArgs(walletMgr.walletEndpointArgs) + try: ignWallet=walletMgr.create("ignition") @@ -948,11 +1202,8 @@ def bootstrap(totalNodes, prodCount, biosHost, biosPort, dontKill=False, onlyBio return biosNode - - # Populates list of EosInstanceInfo objects, matched to actual running instances - def discoverLocalNodes(self, totalNodes, timeout=0): - nodes=[] - + @staticmethod + def pgrepEosServers(timeout=None): pgrepOpts="-fl" # pylint: disable=deprecated-method if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora","CentOS Linux","arch"]: @@ -966,18 +1217,38 @@ def myFunc(): if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) psOut=Utils.checkOutput(cmd.split()) return psOut - except subprocess.CalledProcessError as _: - pass + except subprocess.CalledProcessError as ex: + msg=ex.output.decode("utf-8") + Utils.Print("ERROR: call of \"%s\" failed. %s" % (cmd, msg)) + return None return None - psOut=Utils.waitForObj(myFunc, timeout) + return Utils.waitForObj(myFunc, timeout) + + @staticmethod + def pgrepEosServerPattern(nodeInstance): + if isinstance(nodeInstance, str): + return r"[\n]?(\d+) (.* --data-dir var/lib/node_%s .*)\n" % nodeInstance + else: + nodeInstanceStr="%02d" % nodeInstance + return Cluster.pgrepEosServerPattern(nodeInstanceStr) + + # Populates list of EosInstanceInfo objects, matched to actual running instances + def discoverLocalNodes(self, totalNodes, timeout=None): + nodes=[] + + psOut=Cluster.pgrepEosServers(timeout) if psOut is None: Utils.Print("ERROR: No nodes discovered.") return nodes - if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOut) + if len(psOut) < 6660: + psOutDisplay=psOut + else: + psOutDisplay=psOut[:6660]+"..." + if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) for i in range(0, totalNodes): - pattern=r"[\n]?(\d+) (.* --data-dir var/lib/node_%02d .*)\n" % (i) + pattern=Cluster.pgrepEosServerPattern(i) m=re.search(pattern, psOut, re.MULTILINE) if m is None: Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) @@ -987,8 +1258,19 @@ def myFunc(): if Utils.Debug: Utils.Print("Node>", instance) nodes.append(instance) + if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes + def discoverBiosNodePid(self, timeout=None): + psOut=Cluster.pgrepEosServers(timeout=timeout) + pattern=Cluster.pgrepEosServerPattern("bios") + Utils.Print("pattern={\n%s\n}, psOut=\n%s\n" % (pattern,psOut)) + m=re.search(pattern, psOut, re.MULTILINE) + if m is None: + Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + else: + self.biosNode.pid=int(m.group(1)) + # Kills a percentange of Eos instances starting from the tail and update eosInstanceInfos state def killSomeEosInstances(self, killCount, killSignalStr=Utils.SigKillTag): killSignal=signal.SIGKILL @@ -1064,6 +1346,32 @@ def killall(self, silent=True, allInstances=False): except OSError as _: pass + def bounce(self, nodes, silent=True): + """Bounces nodeos instances as indicated by parameter nodes. + nodes should take the form of a comma-separated list as accepted by the launcher --bounce command (e.g. '00' or '00,01')""" + cmdArr = Cluster.__LauncherCmdArr.copy() + cmdArr.append("--bounce") + cmdArr.append(nodes) + cmd=" ".join(cmdArr) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + if 0 != subprocess.call(cmdArr): + if not silent: Utils.Print("Launcher failed to bounce nodes: %s." % (nodes)) + return False + return True + + def down(self, nodes, silent=True): + """Brings down nodeos instances as indicated by parameter nodes. + nodes should take the form of a comma-separated list as accepted by the launcher --bounce command (e.g. '00' or '00,01')""" + cmdArr = Cluster.__LauncherCmdArr.copy() + cmdArr.append("--down") + cmdArr.append(nodes) + cmd=" ".join(cmdArr) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + if 0 != subprocess.call(cmdArr): + if not silent: Utils.Print("Launcher failed to take down nodes: %s." % (nodes)) + return False + return True + def isMongodDbRunning(self): cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs) subcommand="db.version()" @@ -1132,4 +1440,7 @@ def reportStatus(self): self.biosNode.reportStatus() if hasattr(self, "nodes"): for node in self.nodes: - node.reportStatus() + try: + node.reportStatus() + except: + Utils.Print("No reportStatus") diff --git a/tests/Node.py b/tests/Node.py index a66b5e03fa3..2795ba63c5e 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -10,17 +10,21 @@ from core_symbol import CORE_SYMBOL from testUtils import Utils from testUtils import Account +from testUtils import EnumType +from testUtils import addEnum +from testUtils import unhandledEnumType -class ReturnType: +class ReturnType(EnumType): + pass - def __init__(self, type): - self.type=type +addEnum(ReturnType, "raw") +addEnum(ReturnType, "json") - def __str__(self): - return self.type +class BlockType(EnumType): + pass -setattr(ReturnType, "raw", ReturnType("raw")) -setattr(ReturnType, "json", ReturnType("json")) +addEnum(BlockType, "head") +addEnum(BlockType, "lib") # pylint: disable=too-many-public-methods class Node(object): @@ -32,12 +36,15 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= self.port=port self.pid=pid self.cmd=cmd + if Utils.Debug: Utils.Print("new Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) self.killed=False # marks node as killed self.enableMongo=enableMongo self.mongoHost=mongoHost self.mongoPort=mongoPort self.mongoDb=mongoDb - self.endpointArgs="--url http://%s:%d" % (self.host, self.port) + self.endpointHttp="http://%s:%d" % (self.host, self.port) + self.endpointArgs="--url %s" % (self.endpointHttp) + self.miscEosClientArgs="--no-auto-keosd" self.mongoEndpointArgs="" self.infoValid=None self.lastRetrievedHeadBlockNum=None @@ -45,6 +52,9 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= if self.enableMongo: self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb) + def eosClientArgs(self): + return self.endpointArgs + " " + self.miscEosClientArgs + def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) return "Host: %s, Port:%d" % (self.host, self.port) @@ -167,7 +177,7 @@ def getBlock(self, blockNum, silentErrors=False, exitOnError=False): cmdDesc="get block" cmd="%s %d" % (cmdDesc, blockNum) msg="(block number=%s)" % (blockNum); - return self.processCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) else: cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs) subcommand='db.blocks.findOne( { "block_num": %d } )' % (blockNum) @@ -205,61 +215,94 @@ def getBlockByIdMdb(self, blockId, silentErrors=False): return None - def isBlockPresent(self, blockNum): - """Does node have head_block_num >= blockNum""" + def isBlockPresent(self, blockNum, blockType=BlockType.head): + """Does node have head_block_num/last_irreversible_block_num >= blockNum""" assert isinstance(blockNum, int) + assert isinstance(blockType, BlockType) assert (blockNum > 0) info=self.getInfo(silentErrors=True, exitOnError=True) node_block_num=0 try: - node_block_num=int(info["head_block_num"]) + if blockType==BlockType.head: + node_block_num=int(info["head_block_num"]) + elif blockType==BlockType.lib: + node_block_num=int(info["last_irreversible_block_num"]) + else: + unhandledEnumType(blockType) + except (TypeError, KeyError) as _: - Utils.Print("Failure in get info parsing. %s" % (info)) + Utils.Print("Failure in get info parsing %s block. %s" % (blockType.type, info)) raise - return True if blockNum <= node_block_num else False + present = True if blockNum <= node_block_num else False + if Utils.Debug and blockType==BlockType.lib: + decorator="" + if present: + decorator="is not " + Utils.Print("Block %d is %sfinalized." % (blockNum, decorator)) + + return present def isBlockFinalized(self, blockNum): """Is blockNum finalized""" - assert(blockNum) - assert isinstance(blockNum, int) - assert (blockNum > 0) - - info=self.getInfo(silentErrors=True, exitOnError=True) - node_block_num=0 - try: - node_block_num=int(info["last_irreversible_block_num"]) - except (TypeError, KeyError) as _: - Utils.Print("Failure in get info parsing. %s" % (info)) - raise - - finalized = True if blockNum <= node_block_num else False - if Utils.Debug: - if finalized: - Utils.Print("Block %d is finalized." % (blockNum)) + return self.isBlockPresent(blockNum, blockType=BlockType.lib) + + class BlockWalker: + def __init__(self, node, trans, startBlockNum=None, endBlockNum=None): + self.trans=trans + self.node=node + self.startBlockNum=startBlockNum + self.endBlockNum=endBlockNum + + def walkBlocks(self): + start=None + end=None + blockNum=self.trans["processed"]["action_traces"][0]["block_num"] + # it should be blockNum or later, but just in case the block leading up have any clues... + if self.startBlockNum is not None: + start=self.startBlockNum else: - Utils.Print("Block %d is not yet finalized." % (blockNum)) - - return finalized + start=blockNum-5 + if self.endBlockNum is not None: + end=self.endBlockNum + else: + info=self.node.getInfo() + end=info["head_block_num"] + msg="Original transaction=\n%s\nExpected block_num=%s\n" % (json.dumps(trans, indent=2, sort_keys=True), blockNum) + for blockNum in range(start, end+1): + block=self.node.getBlock(blockNum) + msg+=json.dumps(block, indent=2, sort_keys=True)+"\n" # pylint: disable=too-many-branches - def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayedRetry=True): + def getTransaction(self, transOrTransId, silentErrors=False, exitOnError=False, delayedRetry=True): + transId=None + trans=None + assert(isinstance(transOrTransId, (str,dict))) + if isinstance(transOrTransId, str): + transId=transOrTransId + else: + trans=transOrTransId + transId=Node.getTransId(trans) exitOnErrorForDelayed=not delayedRetry and exitOnError timeout=3 + blockWalker=None if not self.enableMongo: cmdDesc="get transaction" cmd="%s %s" % (cmdDesc, transId) msg="(transaction id=%s)" % (transId); for i in range(0,(int(60/timeout) - 1)): - trans=self.processCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed, exitMsg=msg) if trans is not None or not delayedRetry: return trans + if blockWalker is None: + blockWalker=Node.BlockWalker(self, trans) if Utils.Debug: Utils.Print("Could not find transaction with id %s, delay and retry" % (transId)) time.sleep(timeout) + msg+="\nBlock printout -->>\n%s" % blockWalker.walkBlocks(); # either it is there or the transaction has timed out - return self.processCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) else: for i in range(0,(int(60/timeout) - 1)): trans=self.getTransactionMdb(transId, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed) @@ -285,7 +328,7 @@ def getTransactionMdb(self, transId, silentErrors=False, exitOnError=False): errorMsg="Exception during get db node get trans in mongodb with transaction id=%s. %s" % (transId,msg) if exitOnError: Utils.cmdError("" % (errorMsg)) - errorExit("Failed to retrieve transaction in mongodb for transaction id=%s" % (transId)) + Utils.errorExit("Failed to retrieve transaction in mongodb for transaction id=%s" % (transId)) elif not silentErrors: Utils.Print("ERROR: %s" % (errorMsg)) return None @@ -324,11 +367,16 @@ def isTransInBlock(self, transId, blockId): return False - def getBlockIdByTransId(self, transId, delayedRetry=True): - """Given a transaction Id (string), will return block id (int) containing the transaction""" - assert(transId) - assert(isinstance(transId, str)) - trans=self.getTransaction(transId, exitOnError=True, delayedRetry=delayedRetry) + def getBlockIdByTransId(self, transOrTransId, delayedRetry=True): + """Given a transaction (dictionary) or transaction Id (string), will return the actual block id (int) containing the transaction""" + assert(transOrTransId) + transId=None + assert(isinstance(transOrTransId, (str,dict))) + if isinstance(transOrTransId, str): + transId=transOrTransId + else: + transId=Node.getTransId(transOrTransId) + trans=self.getTransaction(transOrTransId, exitOnError=True, delayedRetry=delayedRetry) refBlockNum=None key="" @@ -407,17 +455,17 @@ def isTransFinalized(self, transId): return False assert(isinstance(blockId, int)) - return self.isBlockFinalized(blockId) + return self.isBlockPresent(blockId, blockType=BlockType.lib) # Create & initialize account and return creation transactions. Return transaction json object - def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=100, exitOnError=False): + def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False): cmdDesc="system newaccount" cmd='%s -j %s %s %s %s --stake-net "%s %s" --stake-cpu "%s %s" --buy-ram "%s %s"' % ( cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey, stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); - trans=self.processCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) transId=Node.getTransId(trans) if stakedDeposit > 0: @@ -434,7 +482,7 @@ def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTran cmd="%s -j %s %s %s %s" % ( cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); - trans=self.processCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) transId=Node.getTransId(trans) if stakedDeposit > 0: @@ -450,7 +498,7 @@ def getEosAccount(self, name, exitOnError=False): cmdDesc="get account" cmd="%s -j %s" % (cmdDesc, name) msg="( getEosAccount(name=%s) )" % (name); - return self.processCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) else: return self.getEosAccountFromDb(name, exitOnError=exitOnError) @@ -459,13 +507,18 @@ def getEosAccountFromDb(self, name, exitOnError=False): subcommand='db.accounts.findOne({"name" : "%s"})' % (name) if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd)) try: - trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError) + timeout = 3 + for i in range(0,(int(60/timeout) - 1)): + trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError) + if trans is not None: + return trans + time.sleep(timeout) return trans except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") if exitOnError: Utils.cmdError("Exception during get account from db for %s. %s" % (name, msg)) - errorExit("Failed during get account from db for %s. %s" % (name, msg)) + Utils.errorExit("Failed during get account from db for %s. %s" % (name, msg)) Utils.Print("ERROR: Exception during get account from db for %s. %s" % (name, msg)) return None @@ -474,7 +527,7 @@ def getTable(self, contract, scope, table, exitOnError=False): cmdDesc = "get table" cmd="%s %s %s %s" % (cmdDesc, contract, scope, table) msg="contract=%s, scope=%s, table=%s" % (contract, scope, table); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) def getTableAccountBalance(self, contract, scope): assert(isinstance(contract, str)) @@ -498,7 +551,7 @@ def getCurrencyBalance(self, contract, account, symbol=CORE_SYMBOL, exitOnError= cmdDesc = "get currency balance" cmd="%s %s %s %s" % (cmdDesc, contract, account, symbol) msg="contract=%s, account=%s, symbol=%s" % (contract, account, symbol); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg, returnType=ReturnType.raw) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg, returnType=ReturnType.raw) def getCurrencyStats(self, contract, symbol=CORE_SYMBOL, exitOnError=False): """returns Json output from get currency stats.""" @@ -509,7 +562,7 @@ def getCurrencyStats(self, contract, symbol=CORE_SYMBOL, exitOnError=False): cmdDesc = "get currency stats" cmd="%s %s %s" % (cmdDesc, contract, symbol) msg="contract=%s, symbol=%s" % (contract, symbol); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) # Verifies account. Returns "get account" json return object def verifyAccount(self, account): @@ -551,21 +604,19 @@ def waitForTransFinalization(self, transId, timeout=None): ret=Utils.waitForBool(lam, timeout) return ret - def waitForNextBlock(self, timeout=None): - num=self.getHeadBlockNum() + def waitForNextBlock(self, timeout=None, blockType=BlockType.head): + num=self.getBlockNum(blockType=blockType) lam = lambda: self.getHeadBlockNum() > num ret=Utils.waitForBool(lam, timeout) return ret - def waitForBlock(self, blockNum, timeout=None): - lam = lambda: self.getHeadBlockNum() > blockNum + def waitForBlock(self, blockNum, timeout=None, blockType=BlockType.head): + lam = lambda: self.getBlockNum(blockType=blockType) > blockNum ret=Utils.waitForBool(lam, timeout) return ret - def waitForIrreversibleBlock(self, blockNum, timeout=None): - lam = lambda: self.getIrreversibleBlockNum() >= blockNum - ret=Utils.waitForBool(lam, timeout) - return ret + def waitForIrreversibleBlock(self, blockNum, timeout=None, blockType=BlockType.head): + return self.waitForBlock(blockNum, timeout=timeout, blockType=blockType) # Trasfer funds. Returns "transfer" json return object def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True): @@ -576,7 +627,7 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False assert(isinstance(destination, Account)) cmd="%s %s -v transfer -j %s %s" % ( - Utils.EosClientPath, self.endpointArgs, source.name, destination.name) + Utils.EosClientPath, self.eosClientArgs(), source.name, destination.name) cmdArr=cmd.split() cmdArr.append(amountStr) cmdArr.append(memo) @@ -592,12 +643,12 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False Utils.Print("ERROR: Exception during funds transfer. %s" % (msg)) if exitOnError: Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination)) - errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) + Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) return None if trans is None: Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination)) - errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) + Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -669,7 +720,7 @@ def getAccountsByKey(self, key, exitOnError=False): cmdDesc = "get accounts" cmd="%s %s" % (cmdDesc, key) msg="key=%s" % (key); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) # Get actions mapped to an account (cleos get actions) def getActions(self, account, pos=-1, offset=-1, exitOnError=False): @@ -681,7 +732,7 @@ def getActions(self, account, pos=-1, offset=-1, exitOnError=False): cmdDesc = "get actions" cmd="%s -j %s %d %d" % (cmdDesc, account.name, pos, offset) msg="account=%s, pos=%d, offset=%d" % (account.name, pos, offset); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) else: return self.getActionsMdb(account, pos, offset, exitOnError=exitOnError) @@ -719,7 +770,7 @@ def getServants(self, name, exitOnError=False): cmdDesc = "get servants" cmd="%s %s" % (cmdDesc, name) msg="name=%s" % (name); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) def getServantsArr(self, name): trans=self.getServants(name, exitOnError=True) @@ -741,7 +792,7 @@ def getAccountEosBalance(self, scope): return balance def getAccountCodeHash(self, account): - cmd="%s %s get code %s" % (Utils.EosClientPath, self.endpointArgs, account) + cmd="%s %s get code %s" % (Utils.EosClientPath, self.eosClientArgs(), account) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) try: retStr=Utils.checkOutput(cmd.split()) @@ -761,7 +812,7 @@ def getAccountCodeHash(self, account): # publish contract and return transaction as json object def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransBlock=False, shouldFail=False): - cmd="%s %s -v set contract -j %s %s" % (Utils.EosClientPath, self.endpointArgs, account, contractDir) + cmd="%s %s -v set contract -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, contractDir) cmd += "" if wasmFile is None else (" "+ wasmFile) cmd += "" if abiFile is None else (" " + abiFile) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) @@ -815,7 +866,7 @@ def getTableColumns(self, contract, scope, table): # returns tuple with transaction and def pushMessage(self, account, action, data, opts, silentErrors=False): - cmd="%s %s push action -j %s %s" % (Utils.EosClientPath, self.endpointArgs, account, action) + cmd="%s %s push action -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, action) cmdArr=cmd.split() if data is not None: cmdArr.append(data) @@ -835,7 +886,7 @@ def pushMessage(self, account, action, data, opts, silentErrors=False): def setPermission(self, account, code, pType, requirement, waitForTransBlock=False, exitOnError=False): cmdDesc="set action permission" cmd="%s -j %s %s %s %s" % (cmdDesc, account, code, pType, requirement) - trans=self.processCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -848,7 +899,7 @@ def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, tran cmd="%s -j %s %s \"%s %s\" \"%s %s\" %s" % ( cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); - trans=self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -857,7 +908,7 @@ def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnEr cmd="%s -j %s %s %s %s" % ( cmdDesc, producer.name, producer.activePublicKey, url, location) msg="producer=%s" % (producer.name); - trans=self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -866,13 +917,51 @@ def vote(self, account, producers, waitForTransBlock=False, exitOnError=False): cmd="%s -j %s %s" % ( cmdDesc, account.name, " ".join(producers)) msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); - trans=self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def processCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(returnType, ReturnType)) + cmd="%s %s %s" % (Utils.EosClientPath, self.eosClientArgs(), cmd) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + if exitMsg is not None: + exitMsg="Context: " + exitMsg + else: + exitMsg="" + trans=None + try: + if returnType==ReturnType.json: + trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) + elif returnType==ReturnType.raw: + trans=Utils.runCmdReturnStr(cmd) + else: + unhandledEnumType(returnType) + except subprocess.CalledProcessError as ex: + if not silentErrors: + msg=ex.output.decode("utf-8") + errorMsg="Exception during \"%s\". Exception message: %s. %s" % (cmdDesc, msg, exitMsg) + if exitOnError: + Utils.cmdError(errorMsg) + Utils.errorExit(errorMsg) + else: + Utils.Print("ERROR: %s" % (errorMsg)) + return None + + if exitOnError and trans is None: + Utils.cmdError("could not \"%s\" - %s" % (cmdDesc,exitMsg)) + errorExit("Failed to \"%s\"" % (cmdDesc)) + + return trans + + def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(producer, str)) + assert(isinstance(whereInSequence, int)) + assert(isinstance(blockType, BlockType)) assert(isinstance(returnType, ReturnType)) - cmd="%s %s %s" % (Utils.EosClientPath, self.endpointArgs, cmd) + basedOnLib="true" if blockType==BlockType.lib else "false" + cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \ + (self.endpointHttp, producer, whereInSequence, basedOnLib) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) trans=None try: @@ -880,10 +969,12 @@ def processCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) elif returnType==ReturnType.raw: trans=Utils.runCmdReturnStr(cmd) + else: + unhandledEnumType(returnType) except subprocess.CalledProcessError as ex: if not silentErrors: msg=ex.output.decode("utf-8") - errorMsg="Exception during %s. %s" % (cmdDesc, msg) + errorMsg="Exception during \"%s\". %s" % (cmd, msg) if exitOnError: Utils.cmdError(errorMsg) Utils.errorExit(errorMsg) @@ -896,8 +987,8 @@ def processCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg else: exitMsg="" if exitOnError and trans is None: - Utils.cmdError("could not %s - %s" % (cmdDesc,exitMsg)) - errorExit("Failed to %s" % (cmdDesc)) + Utils.cmdError("could not \"%s\" - %s" % (cmd,exitMsg)) + Utils.errorExit("Failed to \"%s\"" % (cmd)) return trans @@ -909,13 +1000,13 @@ def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False) if not self.waitForTransInBlock(transId): if exitOnError: Utils.cmdError("transaction with id %s never made it to a block" % (transId)) - errorExit("Failed to find transaction with id %s in a block before timeout" % (transId)) + Utils.errorExit("Failed to find transaction with id %s in a block before timeout" % (transId)) return None return trans def getInfo(self, silentErrors=False, exitOnError=False): cmdDesc = "get info" - info=self.processCmd(cmdDesc, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError) + info=self.processCleosCmd(cmdDesc, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError) if info is None: self.infoValid=False else: @@ -968,6 +1059,15 @@ def getIrreversibleBlockNum(self): return blockNum return None + def getBlockNum(self, blockType=BlockType.head): + assert isinstance(blockType, BlockType) + if blockType==BlockType.head: + return self.getHeadBlockNum() + elif blockType==BlockType.lib: + return self.getIrreversibleBlockNum() + else: + unhandledEnumType(blockType) + def kill(self, killSignal): if Utils.Debug: Utils.Print("Killing node: %s" % (self.cmd)) assert(self.pid is not None) @@ -1011,6 +1111,50 @@ def verifyAlive(self, silent=False): else: return True + def getBlockProducerByNum(self, blockNum, timeout=None, waitForBlock=True, exitOnError=True): + if waitForBlock: + self.waitForBlock(blockNum, timeout=timeout, blockType=BlockType.head) + block=self.getBlock(blockNum, exitOnError=exitOnError) + blockProducer=block["producer"] + if blockProducer is None and exitOnError: + Utils.cmdError("could not get producer for block number %s" % (blockNum)) + errorExit("Failed to get block's producer") + return blockProducer + + def getBlockProducer(self, timeout=None, waitForBlock=True, exitOnError=True, blockType=BlockType.head): + blockNum=self.getBlockNum(blockType=blockType) + block=self.getBlock(blockNum, exitOnError=exitOnError, blockType=blockType) + blockProducer=block["producer"] + if blockProducer is None and exitOnError: + Utils.cmdError("could not get producer for block number %s" % (blockNum)) + errorExit("Failed to get block's producer") + return blockProducer + + def getNextCleanProductionCycle(self, trans): + transId=Node.getTransId(trans) + rounds=21*12*2 # max time to ensure that at least 2/3+1 of producers x blocks per producer x at least 2 times + self.waitForTransFinalization(transId, timeout=rounds/2) + irreversibleBlockNum=self.getIrreversibleBlockNum() + + # The voted schedule should be promoted now, then need to wait for that to become irreversible + votingTallyWindow=120 #could be up to 120 blocks before the votes were tallied + promotedBlockNum=self.getHeadBlockNum()+votingTallyWindow + self.waitForIrreversibleBlock(promotedBlockNum, timeout=rounds/2) + + ibnSchedActive=self.getIrreversibleBlockNum() + + blockNum=self.getHeadBlockNum() + Utils.Print("Searching for clean production cycle blockNum=%s ibn=%s transId=%s promoted bn=%s ibn for schedule active=%s" % (blockNum,irreversibleBlockNum,transId,promotedBlockNum,ibnSchedActive)) + blockProducer=self.getBlockProducerByNum(blockNum) + blockNum+=1 + Utils.Print("Advance until the next block producer is retrieved") + while blockProducer == self.getBlockProducerByNum(blockNum): + blockNum+=1 + + blockProducer=self.getBlockProducerByNum(blockNum) + return blockNum + + # TBD: make nodeId an internal property # pylint: disable=too-many-locals def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None): @@ -1061,6 +1205,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim Utils.Print("cmd: %s" % (cmd)) popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) self.pid=popen.pid + if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) def isNodeAlive(): """wait for node to be responsive.""" diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 9270c1a75a1..6e00645e9dc 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -6,6 +6,22 @@ import argparse +class AppArgs: + def __init__(self): + self.args=[] + + class AppArg: + def __init__(self, flag, type, help, default, choices=None): + self.flag=flag + self.type=type + self.help=help + self.default=default + self.choices=choices + + def add(self, flag, type, help, default, choices=None): + arg=self.AppArg(flag, type, help, default, choices) + self.args.append(arg) + # pylint: disable=too-many-instance-attributes class TestHelper(object): LOCAL_HOST="localhost" @@ -14,10 +30,11 @@ class TestHelper(object): @staticmethod # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def parse_args(includeArgs): + def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): """Accepts set of arguments, builds argument parser and returns parse_args() output.""" assert(includeArgs) assert(isinstance(includeArgs, set)) + assert(isinstance(applicationSpecificArgs, AppArgs)) parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-?', action='help', default=argparse.SUPPRESS, @@ -82,6 +99,9 @@ def parse_args(includeArgs): if "--sanity-test" in includeArgs: parser.add_argument("--sanity-test", help="Validates nodeos and kleos are in path and can be started up.", action='store_true') + for arg in applicationSpecificArgs.args: + parser.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) + args = parser.parse_args() return args @@ -122,6 +142,13 @@ def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, kil if walletMgr: walletMgr.dumpErrorDetails() Utils.Print("== Errors see above ==") + if len(Utils.CheckOutputDeque)>0: + Utils.Print("== cout/cerr pairs from last %d calls to Utils. ==" % len(Utils.CheckOutputDeque)) + for out, err, cmd in reversed(Utils.CheckOutputDeque): + Utils.Print("cmd={%s}" % (" ".join(cmd))) + Utils.Print("cout={%s}" % (out)) + Utils.Print("cerr={%s}\n" % (err)) + Utils.Print("== cmd/cout/cerr pairs done. ==") if killEosInstances: Utils.Print("Shut down the cluster.") diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 934e0258638..c46dd78d6fd 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -44,7 +44,7 @@ def launch(self): self.__walletPid=popen.pid # Give keosd time to warm up - time.sleep(1) + time.sleep(2) return True def create(self, name, accounts=None, exitOnError=True): @@ -53,15 +53,36 @@ def create(self, name, accounts=None, exitOnError=True): if Utils.Debug: Utils.Print("Wallet \"%s\" already exists. Returning same." % name) return wallet p = re.compile(r'\n\"(\w+)\"\n', re.MULTILINE) - cmd="%s %s wallet create --name %s --to-console" % (Utils.EosClientPath, self.endpointArgs, name) + cmdDesc="wallet create" + cmd="%s %s %s --name %s --to-console" % (Utils.EosClientPath, self.endpointArgs, cmdDesc, name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - retStr=Utils.checkOutput(cmd.split()) - #Utils.Print("create: %s" % (retStr)) + retStr=None + maxRetryCount=4 + retryCount=0 + while True: + try: + retStr=Utils.checkOutput(cmd.split()) + break + except subprocess.CalledProcessError as ex: + retryCount+=1 + if retryCount +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include + +#include + +#include +#include + +#include +#include + +#ifdef NON_VALIDATING_TEST +#define TESTER tester +#else +#define TESTER validating_tester +#endif + +using namespace eosio; +using namespace eosio::chain; +using namespace eosio::testing; +using namespace fc; + +BOOST_AUTO_TEST_SUITE(get_table_tests) + +BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { + produce_blocks(2); + + create_accounts({ N(eosio.token), N(eosio.ram), N(eosio.ramfee), N(eosio.stake), + N(eosio.bpay), N(eosio.vpay), N(eosio.saving), N(eosio.names) }); + + std::vector accs{N(inita), N(initb), N(initc), N(initd)}; + create_accounts(accs); + produce_block(); + + set_code( N(eosio.token), eosio_token_wast ); + set_abi( N(eosio.token), eosio_token_abi ); + produce_blocks(1); + + // create currency + auto act = mutable_variant_object() + ("issuer", "eosio") + ("maximum_supply", eosio::chain::asset::from_string("1000000000.0000 SYS")); + push_action(N(eosio.token), N(create), N(eosio.token), act ); + + // issue + for (account_name a: accs) { + push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() + ("to", name(a) ) + ("quantity", eosio::chain::asset::from_string("999.0000 SYS") ) + ("memo", "") + ); + } + produce_blocks(1); + + // iterate over scope + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only::get_table_by_scope_params param{N(eosio.token), N(accounts), "inita", "", 10}; + eosio::chain_apis::read_only::get_table_by_scope_result result = plugin.read_only::get_table_by_scope(param); + + BOOST_REQUIRE_EQUAL(4, result.rows.size()); + BOOST_REQUIRE_EQUAL("", result.more); + if (result.rows.size() >= 4) { + BOOST_REQUIRE_EQUAL(name(N(eosio.token)), result.rows[0].code); + BOOST_REQUIRE_EQUAL(name(N(inita)), result.rows[0].scope); + BOOST_REQUIRE_EQUAL(name(N(accounts)), result.rows[0].table); + BOOST_REQUIRE_EQUAL(name(N(eosio)), result.rows[0].payer); + BOOST_REQUIRE_EQUAL(1, result.rows[0].count); + + BOOST_REQUIRE_EQUAL(name(N(initb)), result.rows[1].scope); + BOOST_REQUIRE_EQUAL(name(N(initc)), result.rows[2].scope); + BOOST_REQUIRE_EQUAL(name(N(initd)), result.rows[3].scope); + } + + param.lower_bound = "initb"; + param.upper_bound = "initd"; + result = plugin.read_only::get_table_by_scope(param); + BOOST_REQUIRE_EQUAL(2, result.rows.size()); + BOOST_REQUIRE_EQUAL("", result.more); + if (result.rows.size() >= 2) { + BOOST_REQUIRE_EQUAL(name(N(initb)), result.rows[0].scope); + BOOST_REQUIRE_EQUAL(name(N(initc)), result.rows[1].scope); + } + + param.limit = 1; + result = plugin.read_only::get_table_by_scope(param); + BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL("initc", result.more); + + param.table = name(0); + result = plugin.read_only::get_table_by_scope(param); + BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL("initc", result.more); + + param.table = N(invalid); + result = plugin.read_only::get_table_by_scope(param); + BOOST_REQUIRE_EQUAL(0, result.rows.size()); + BOOST_REQUIRE_EQUAL("", result.more); + +} FC_LOG_AND_RETHROW() /// get_scope_test + +BOOST_AUTO_TEST_SUITE_END() + diff --git a/tests/launcher_test.py b/tests/launcher_test.py new file mode 100755 index 00000000000..4db21658aa8 --- /dev/null +++ b/tests/launcher_test.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from TestHelper import TestHelper + +import decimal +import re + +############################################################### +# nodeos_run_test +# --dump-error-details +# --keep-logs +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit +cmdError=Utils.cmdError +from core_symbol import CORE_SYMBOL + +args = TestHelper.parse_args({"--defproducera_prvt_key","--dump-error-details","--dont-launch","--keep-logs", + "-v","--leave-running","--clean-run","--p2p-plugin"}) +debug=args.v +defproduceraPrvtKey=args.defproducera_prvt_key +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontLaunch=args.dont_launch +dontKill=args.leave_running +killAll=args.clean_run +p2pPlugin=args.p2p_plugin + +Utils.Debug=debug +cluster=Cluster(walletd=True, defproduceraPrvtKey=defproduceraPrvtKey) +walletMgr=WalletMgr(True) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName="keosd" +ClientName="cleos" +timeout = .5 * 12 * 2 + 60 # time for finalization with 1 producer + 60 seconds padding +Utils.setIrreversibleTimeout(timeout) + +try: + TestHelper.printSystemInfo("BEGIN") + + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + + if not dontLaunch: + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + if cluster.launch(pnodes=4, dontKill=dontKill, p2pPlugin=p2pPlugin) is False: + cmdError("launcher") + errorExit("Failed to stand up eos cluster.") + else: + cluster.initializeNodes(defproduceraPrvtKey=defproduceraPrvtKey) + killEosInstances=False + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + accounts=Cluster.createAccountKeys(3) + if accounts is None: + errorExit("FAILURE - create keys") + testeraAccount=accounts[0] + testeraAccount.name="testera11111" + currencyAccount=accounts[1] + currencyAccount.name="currency1111" + exchangeAccount=accounts[2] + exchangeAccount.name="exchange1111" + + PRV_KEY1=testeraAccount.ownerPrivateKey + PUB_KEY1=testeraAccount.ownerPublicKey + PRV_KEY2=currencyAccount.ownerPrivateKey + PUB_KEY2=currencyAccount.ownerPublicKey + PRV_KEY3=exchangeAccount.activePrivateKey + PUB_KEY3=exchangeAccount.activePublicKey + + testeraAccount.activePrivateKey=currencyAccount.activePrivateKey=PRV_KEY3 + testeraAccount.activePublicKey=currencyAccount.activePublicKey=PUB_KEY3 + + exchangeAccount.ownerPrivateKey=PRV_KEY2 + exchangeAccount.ownerPublicKey=PUB_KEY2 + + Print("Stand up %s" % (WalletdName)) + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + if walletMgr.launch() is False: + cmdError("%s" % (WalletdName)) + errorExit("Failed to stand up eos walletd.") + + testWalletName="test" + Print("Creating wallet \"%s\"." % (testWalletName)) + testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,cluster.defproduceraAccount]) + + Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) + + for account in accounts: + Print("Importing keys for account %s into wallet %s." % (account.name, testWallet.name)) + if not walletMgr.importKey(account, testWallet): + cmdError("%s wallet import" % (ClientName)) + errorExit("Failed to import key for account %s" % (account.name)) + + defproduceraWalletName="defproducera" + Print("Creating wallet \"%s\"." % (defproduceraWalletName)) + defproduceraWallet=walletMgr.create(defproduceraWalletName) + + Print("Wallet \"%s\" password=%s." % (defproduceraWalletName, defproduceraWallet.password.encode("utf-8"))) + + defproduceraAccount=cluster.defproduceraAccount + + Print("Importing keys for account %s into wallet %s." % (defproduceraAccount.name, defproduceraWallet.name)) + if not walletMgr.importKey(defproduceraAccount, defproduceraWallet): + cmdError("%s wallet import" % (ClientName)) + errorExit("Failed to import key for account %s" % (defproduceraAccount.name)) + + node=cluster.getNode(0) + + Print("Validating accounts before user accounts creation") + cluster.validateAccounts(None) + + # create accounts via eosio as otherwise a bid is needed + Print("Create new account %s via %s" % (testeraAccount.name, cluster.eosioAccount.name)) + transId=node.createInitializeAccount(testeraAccount, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) + + Print("Create new account %s via %s" % (currencyAccount.name, cluster.eosioAccount.name)) + transId=node.createInitializeAccount(currencyAccount, cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000, exitOnError=True) + + Print("Create new account %s via %s" % (exchangeAccount.name, cluster.eosioAccount.name)) + transId=node.createInitializeAccount(exchangeAccount, cluster.eosioAccount, buyRAM=1000000, waitForTransBlock=True, exitOnError=True) + + Print("Validating accounts after user accounts creation") + accounts=[testeraAccount, currencyAccount, exchangeAccount] + cluster.validateAccounts(accounts) + + Print("Verify account %s" % (testeraAccount)) + if not node.verifyAccount(testeraAccount): + errorExit("FAILURE - account creation failed.", raw=True) + + transferAmount="97.5321 {0}".format(CORE_SYMBOL) + Print("Transfer funds %s from account %s to %s" % (transferAmount, defproduceraAccount.name, testeraAccount.name)) + node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer") + + expectedAmount=transferAmount + Print("Verify transfer, Expected: %s" % (expectedAmount)) + actualAmount=node.getAccountEosBalanceStr(testeraAccount.name) + if expectedAmount != actualAmount: + cmdError("FAILURE - transfer failed") + errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) + + transferAmount="0.0100 {0}".format(CORE_SYMBOL) + Print("Force transfer funds %s from account %s to %s" % ( + transferAmount, defproduceraAccount.name, testeraAccount.name)) + node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", force=True) + + expectedAmount="97.5421 {0}".format(CORE_SYMBOL) + Print("Verify transfer, Expected: %s" % (expectedAmount)) + actualAmount=node.getAccountEosBalanceStr(testeraAccount.name) + if expectedAmount != actualAmount: + cmdError("FAILURE - transfer failed") + errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) + + Print("Validating accounts after some user transactions") + accounts=[testeraAccount, currencyAccount, exchangeAccount] + cluster.validateAccounts(accounts) + + transferAmount="97.5311 {0}".format(CORE_SYMBOL) + Print("Transfer funds %s from account %s to %s" % ( + transferAmount, testeraAccount.name, currencyAccount.name)) + trans=node.transferFunds(testeraAccount, currencyAccount, transferAmount, "test transfer a->b") + transId=Node.getTransId(trans) + + expectedAmount="98.0311 {0}".format(CORE_SYMBOL) # 5000 initial deposit + Print("Verify transfer, Expected: %s" % (expectedAmount)) + actualAmount=node.getAccountEosBalanceStr(currencyAccount.name) + if expectedAmount != actualAmount: + cmdError("FAILURE - transfer failed") + errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) + + Print("Validate last action for account %s" % (testeraAccount.name)) + actions=node.getActions(testeraAccount, -1, -1, exitOnError=True) + try: + assert(actions["actions"][0]["action_trace"]["act"]["name"] == "transfer") + except (AssertionError, TypeError, KeyError) as _: + Print("Action validation failed. Actions: %s" % (actions)) + raise + + node.waitForTransInBlock(transId) + + transaction=node.getTransaction(trans, exitOnError=True, delayedRetry=False) + + typeVal=None + amountVal=None + key="" + try: + key="[traces][0][act][name]" + typeVal= transaction["traces"][0]["act"]["name"] + key="[traces][0][act][data][quantity]" + amountVal=transaction["traces"][0]["act"]["data"]["quantity"] + amountVal=int(decimal.Decimal(amountVal.split()[0])*10000) + except (TypeError, KeyError) as e: + Print("transaction%s not found. Transaction: %s" % (key, transaction)) + raise + + if typeVal != "transfer" or amountVal != 975311: + errorExit("FAILURE - get transaction trans_id failed: %s %s %s" % (transId, typeVal, amountVal), raw=True) + + Print("Bouncing nodes #00 and #01") + if cluster.bounce("00,01") is False: + cmdError("launcher bounce") + errorExit("Failed to bounce eos node.") + + Print("Taking down node #02") + if cluster.down("02") is False: + cmdError("launcher down command") + errorExit("Failed to take down eos node.") + + Print("Using bounce option to re-launch node #02") + if cluster.bounce("02") is False: + cmdError("launcher bounce") + errorExit("Failed to bounce eos node.") + + p = re.compile('Assert') + errFileName="var/lib/node_00/stderr.txt" + assertionsFound=False + with open(errFileName) as errFile: + for line in errFile: + if p.search(line): + assertionsFound=True + + if assertionsFound: + # Too many assertion logs, hard to validate how many are genuine. Make this a warning + # for now, hopefully the logs will get cleaned up in future. + Print("WARNING: Asserts in var/lib/node_00/stderr.txt") + #errorExit("FAILURE - Assert in var/lib/node_00/stderr.txt") + + Print("Validating accounts at end of test") + accounts=[testeraAccount, currencyAccount, exchangeAccount] + cluster.validateAccounts(accounts) + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exit(0) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py new file mode 100755 index 00000000000..4c4105721d5 --- /dev/null +++ b/tests/nodeos_forked_chain_test.py @@ -0,0 +1,412 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +import testUtils +import time +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import BlockType +from Node import Node +from TestHelper import AppArgs +from TestHelper import TestHelper + +import decimal +import math +import re +import signal + +############################################################### +# nodeos_forked_chain_test +# --dump-error-details +# --keep-logs +############################################################### +Print=Utils.Print + +from core_symbol import CORE_SYMBOL + +def analyzeBPs(bps0, bps1, expectDivergence): + start=0 + index=None + length=len(bps0) + firstDivergence=None + errorInDivergence=False + while start < length: + bpsStr=None + for i in range(start,length): + bp0=bps0[i] + bp1=bps1[i] + if bpsStr is None: + bpsStr="" + else: + bpsStr+=", " + blockNum0=bp0["blockNum"] + prod0=bp0["prod"] + blockNum1=bp1["blockNum"] + prod1=bp1["prod"] + numDiff=True if blockNum0!=blockNum1 else False + prodDiff=True if prod0!=prod1 else False + if numDiff or prodDiff: + index=i + if firstDivergence is None: + firstDivergence=min(blockNum0, blockNum1) + if not expectDivergence: + errorInDivergence=True + break + bpsStr+=str(blockNum0)+"->"+prod0 + + if index is None: + return + + bpsStr0=None + bpsStr2=None + start=length + for i in range(index,length): + if bpsStr0 is None: + bpsStr0="" + bpsStr1="" + else: + bpsStr0+=", " + bpsStr1+=", " + bp0=bps0[i] + bp1=bps1[i] + blockNum0=bp0["blockNum"] + prod0=bp0["prod"] + blockNum1=bp1["blockNum"] + prod1=bp1["prod"] + numDiff="*" if blockNum0!=blockNum1 else "" + prodDiff="*" if prod0!=prod1 else "" + if not numDiff and not prodDiff: + start=i + index=None + if expectDivergence: + errorInDivergence=True + break + bpsStr0+=str(blockNum0)+numDiff+"->"+prod0+prodDiff + bpsStr1+=str(blockNum1)+numDiff+"->"+prod1+prodDiff + if errorInDivergence: + msg="Failed analyzing block producers - " + if expectDivergence: + msg+="nodes indicate different block producers for the same blocks, but did not expect them to diverge." + else: + msg+="did not expect nodes to indicate different block producers for the same blocks." + msg+="\n Matching Blocks= %s \n Diverging branch node0= %s \n Diverging branch node1= %s" % (bpsStr,bpsStr0,bpsStr1) + Utils.errorExit(msg) + return firstDivergence + +def getMinHeadAndLib(prodNodes): + info0=prodNodes[0].getInfo(exitOnError=True) + info1=prodNodes[1].getInfo(exitOnError=True) + headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"])) + libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"])) + return (headBlockNum, libNum) + + + +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"}) +Utils.Debug=args.v +totalProducerNodes=2 +totalNonProducerNodes=1 +totalNodes=totalProducerNodes+totalNonProducerNodes +maxActiveProducers=21 +totalProducers=maxActiveProducers +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +prodCount=args.prod_count +killAll=args.clean_run +p2pPlugin=args.p2p_plugin + +walletMgr=WalletMgr(True) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName="keosd" +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN") + + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + specificExtraNodeosArgs={} + # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node + specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin" + + + # *** setup topogrophy *** + + # "bridge" shape connects defprocera through defproducerk (in node0) to each other and defproducerl through defproduceru (in node01) + # and the only connection between those 2 groups is through the bridge node + + if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, topo="bridge", pnodes=totalProducerNodes, + totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin, + useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: + Utils.cmdError("launcher") + Utils.errorExit("Failed to stand up eos cluster.") + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + + # *** create accounts to vote in desired producers *** + + accounts=cluster.createAccountKeys(5) + if accounts is None: + Utils.errorExit("FAILURE - create keys") + accounts[0].name="tester111111" + accounts[1].name="tester222222" + accounts[2].name="tester333333" + accounts[3].name="tester444444" + accounts[4].name="tester555555" + + testWalletName="test" + + Print("Creating wallet \"%s\"." % (testWalletName)) + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + if walletMgr.launch() is False: + Utils.cmdError("%s" % (WalletdName)) + Utils.errorExit("Failed to stand up eos walletd.") + + testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]]) + + for _, account in cluster.defProducerAccounts.items(): + walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True) + + Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) + + + # *** identify each node (producers and non-producing node) *** + + nonProdNode=None + prodNodes=[] + producers=[] + for i in range(0, totalNodes): + node=cluster.getNode(i) + node.producers=Cluster.parseProducers(i) + numProducers=len(node.producers) + Print("node has producers=%s" % (node.producers)) + if numProducers==0: + if nonProdNode is None: + nonProdNode=node + nonProdNode.nodeNum=i + else: + Utils.errorExit("More than one non-producing nodes") + else: + for prod in node.producers: + trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) + + prodNodes.append(node) + producers.extend(node.producers) + + + # *** delegate bandwidth to accounts *** + + node=prodNodes[0] + # create accounts via eosio as otherwise a bid is needed + for account in accounts: + Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) + trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) + transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) + Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) + node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") + trans=node.delegatebw(account, 20000000.0000, 20000000.0000, exitOnError=True) + + + # *** vote using accounts *** + + #verify nodes are in sync and advancing + cluster.waitOnClusterSync(blockAdvancing=5) + index=0 + for account in accounts: + Print("Vote for producers=%s" % (producers)) + trans=prodNodes[index % len(prodNodes)].vote(account, producers) + index+=1 + + + # *** Identify a block where production is stable *** + + #verify nodes are in sync and advancing + cluster.waitOnClusterSync(blockAdvancing=5) + blockNum=node.getNextCleanProductionCycle(trans) + blockProducer=node.getBlockProducerByNum(blockNum) + Print("Validating blockNum=%s, producer=%s" % (blockNum, blockProducer)) + cluster.biosNode.kill(signal.SIGTERM) + + #advance to the next block of 12 + lastBlockProducer=blockProducer + while blockProducer==lastBlockProducer: + blockNum+=1 + blockProducer=node.getBlockProducerByNum(blockNum) + + + # *** Identify what the production cycel is *** + + productionCycle=[] + producerToSlot={} + slot=-1 + inRowCountPerProducer=12 + while True: + if blockProducer not in producers: + Utils.errorExit("Producer %s was not one of the voted on producers" % blockProducer) + + productionCycle.append(blockProducer) + slot+=1 + if blockProducer in producerToSlot: + Utils.errorExit("Producer %s was first seen in slot %d, but is repeated in slot %d" % (blockProducer, producerToSlot[blockProducer], slot)) + + producerToSlot[blockProducer]={"slot":slot, "count":0} + lastBlockProducer=blockProducer + while blockProducer==lastBlockProducer: + producerToSlot[blockProducer]["count"]+=1 + blockNum+=1 + blockProducer=node.getBlockProducerByNum(blockNum) + + if producerToSlot[lastBlockProducer]["count"]!=inRowCountPerProducer: + Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, inRowCountPerProducer, producerToSlot[lastBlockProducer]["count"])) + + if blockProducer==productionCycle[0]: + break + + output=None + for blockProducer in productionCycle: + if output is None: + output="" + else: + output+=", " + output+=blockProducer+":"+str(producerToSlot[blockProducer]["count"]) + Print("ProductionCycle ->> {\n%s\n}" % output) + + #retrieve the info for all the nodes to report the status for each + for node in cluster.getNodes(): + node.getInfo() + cluster.reportStatus() + + + # *** Killing the "bridge" node *** + + Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.") + # block number to start expecting node killed after + preKillBlockNum=nonProdNode.getBlockNum() + preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum) + # kill at last block before defproducerl, since the block it is killed on will get propagated + killAtProducer="defproducerk" + nonProdNode.killNodeOnProducer(producer=killAtProducer, whereInSequence=(inRowCountPerProducer-1)) + + + # *** Identify a highest block number to check while we are trying to identify where the divergence will occur *** + + # will search full cycle after the current block, since we don't know how many blocks were produced since retrieving + # block number and issuing kill command + postKillBlockNum=prodNodes[1].getBlockNum() + blockProducers0=[] + blockProducers1=[] + libs0=[] + libs1=[] + lastBlockNum=max([preKillBlockNum,postKillBlockNum])+maxActiveProducers*inRowCountPerProducer + actualLastBlockNum=None + prodChanged=False + nextProdChange=False + #identify the earliest LIB to start identify the earliest block to check if divergent branches eventually reach concensus + (headBlockNum, libNumAroundDivergence)=getMinHeadAndLib(prodNodes) + for blockNum in range(preKillBlockNum,lastBlockNum): + #avoiding getting LIB until my current block passes the head from the last time I checked + if blockNum>headBlockNum: + (headBlockNum, libNumAroundDivergence)=getMinHeadAndLib(prodNodes) + + # track the block number and producer from each producing node + blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) + blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) + blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) + blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) + + #in the case that the preKillBlockNum was also produced by killAtProducer, ensure that we have + #at least one producer transition before checking for killAtProducer + if not prodChanged: + if preKillBlockProducer!=blockProducer0: + prodChanged=True + + #since it is killing for the last block of killAtProducer, we look for the next producer change + if not nextProdChange and prodChanged and blockProducer1==killAtProducer: + nextProdChange=True + elif nextProdChange and blockProducer1!=killAtProducer: + actualLastBlockNum=blockNum + break + + #if we diverge before identifying the actualLastBlockNum, then there is an ERROR + if blockProducer0!=blockProducer1: + Utils.errorExit("Groups reported different block producers for block number %d. %s != %s." % (blockNum,blockProducer0,blockProducer1)) + + + # *** Analyze the producers leading up to the block after killing the non-producing node *** + + firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) + # Nodes should not have diverged till the last block + if firstDivergence!=blockNum: + Utils.errorExit("Expected to diverge at %s, but diverged at %s." % (firstDivergence, blockNum)) + blockProducers0=[] + blockProducers1=[] + + #verify that the non producing node is not alive (and populate the producer nodes with current getInfo data to report if + #an error occurs) + if nonProdNode.verifyAlive(): + Utils.errorExit("Expected the non-producing node to have shutdown.") + for prodNode in prodNodes: + prodNode.getInfo() + + + # *** Track the blocks from the divergence till there are 10*12 blocks on one chain and 10*12+1 on the other *** + + killBlockNum=blockNum + lastBlockNum=killBlockNum+(maxActiveProducers - 1)*inRowCountPerProducer+1 # allow 1st testnet group to produce just 1 more block than the 2nd + for blockNum in range(killBlockNum,lastBlockNum): + blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) + blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) + blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) + blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) + + + # *** Analyze the producers from the divergence to the lastBlockNum and verify they stay diverged *** + + firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) + if firstDivergence!=killBlockNum: + Utils.errorExit("Expected to diverge at %s, but diverged at %s." % (firstDivergence, killBlockNum)) + blockProducers0=[] + blockProducers1=[] + + + # *** Relaunch the non-producing bridge node to connect the producing nodes again *** + + if not nonProdNode.relaunch(nonProdNode.nodeNum, None): + errorExit("Failure - (non-production) node %d should have restarted" % (nonProdNode.nodeNum)) + + + # *** Identify the producers from the saved LIB to the current highest head *** + + #ensure that the nodes have enough time to get in concensus, so wait for 3 producers to produce their complete round + time.sleep(inRowCountPerProducer * 3 / 2) + + # ensure all blocks from the lib before divergence till the current head are now in consensus + endBlockNum=max(prodNodes[0].getBlockNum(), prodNodes[1].getBlockNum()) + + for blockNum in range(libNumAroundDivergence,endBlockNum): + blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) + blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) + blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) + blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) + + + # *** Analyze the producers from the saved LIB to the current highest head and verify they match now *** + + analyzeBPs(blockProducers0, blockProducers1, expectDivergence=False) + + blockProducers0=[] + blockProducers1=[] + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exit(0) diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 85be1bd940b..f28f62a730a 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -206,10 +206,10 @@ transId=node.createInitializeAccount(testeraAccount, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) Print("Create new account %s via %s" % (currencyAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(currencyAccount, cluster.eosioAccount, stakedDeposit=5000, exitOnError=True) + transId=node.createInitializeAccount(currencyAccount, cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000, exitOnError=True) Print("Create new account %s via %s" % (exchangeAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(exchangeAccount, cluster.eosioAccount, waitForTransBlock=True, exitOnError=True) + transId=node.createInitializeAccount(exchangeAccount, cluster.eosioAccount, buyRAM=1000000, waitForTransBlock=True, exitOnError=True) Print("Validating accounts after user accounts creation") accounts=[testeraAccount, currencyAccount, exchangeAccount] @@ -282,7 +282,7 @@ node.waitForTransInBlock(transId) - transaction=node.getTransaction(transId, exitOnError=True, delayedRetry=False) + transaction=node.getTransaction(trans, exitOnError=True, delayedRetry=False) typeVal=None amountVal=None @@ -467,7 +467,7 @@ raise Print("Test for block decoded packed transaction (issue 2932)") - blockId=node.getBlockIdByTransId(transId) + blockId=node.getBlockIdByTransId(trans[1]) assert(blockId) block=node.getBlock(blockId, exitOnError=True) @@ -601,18 +601,6 @@ errorExit("Failed to lock wallet %s" % (defproduceraWallet.name)) - Print("Exchange Contract Tests") - Print("upload exchange contract") - - contractDir="contracts/exchange" - wasmFile="exchange.wasm" - abiFile="exchange.abi" - Print("Publish exchange contract") - trans=node.publishContract(exchangeAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - cmdError("%s set contract exchange" % (ClientName)) - errorExit("Failed to publish contract.") - contractDir="contracts/simpledb" wasmFile="simpledb.wasm" abiFile="simpledb.abi" @@ -622,7 +610,7 @@ if retMap is None: errorExit("Failed to publish, but should have returned a details map") if retMap["returncode"] == 0 or retMap["returncode"] == 139: # 139 SIGSEGV - errorExit("FAILURE - set contract exchange failed", raw=True) + errorExit("FAILURE - set contract simpledb failed", raw=True) else: Print("Test successful, %s returned error code: %d" % (ClientName, retMap["returncode"])) diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 2fe356f68b5..c615b4fbb38 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -13,6 +13,9 @@ import math import re +Print=Utils.Print +errorExit=Utils.errorExit + class NamedAccounts: def __init__(self, cluster, numAccounts): @@ -50,8 +53,6 @@ def setName(self, num): # --dump-error-details # --keep-logs ############################################################### -Print=Utils.Print -errorExit=Utils.errorExit args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}) Utils.Debug=args.v @@ -81,7 +82,7 @@ def setName(self, num): maxRAMFlag="--chain-state-db-size-mb" maxRAMValue=1010 extraNodeosArgs=" %s %d %s %d " % (minRAMFlag, minRAMValue, maxRAMFlag, maxRAMValue) - if cluster.launch(onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs) is False: + if cluster.launch(onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs, useBiosBootFile=False) is False: Utils.cmdError("launcher") errorExit("Failed to stand up eos cluster.") @@ -143,7 +144,7 @@ def setName(self, num): Print("Publish contract") trans=nodes[0].publishContract(contractAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) if trans is None: - cmdError("%s set contract %s" % (ClientName, contractAccount.name)) + Utils.cmdError("%s set contract %s" % (ClientName, contractAccount.name)) errorExit("Failed to publish contract.") contract=contractAccount.name diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index df9bbb689f4..b6f176af8c9 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -25,83 +25,39 @@ def populate(node, num): ProducerToNode.map[prod]=num Utils.Print("Producer=%s for nodeNum=%s" % (prod,num)) -def vote(node, account, producers): - Print("Votes for %s" % (account.name)) - trans=node.vote(account, producers, waitForTransBlock=False, exitOnError=True) - return trans - -def getBlockProducer(node, blockNum): - node.waitForBlock(blockNum) - block=node.getBlock(blockNum, exitOnError=True) - blockProducer=block["producer"] - if blockProducer is None: - Utils.cmdError("could not get producer for block number %s" % (blockNum)) - errorExit("Failed to get block's producer") - return blockProducer - -def getNodeNum(cluster, node): - for i in range(0, 4): - if node == cluster.getNode(i): - return i - return -1 - def isValidBlockProducer(prodsActive, blockNum, node): - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) if blockProducer not in prodsActive: return False return prodsActive[blockProducer] def validBlockProducer(prodsActive, prodsSeen, blockNum, node): - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) if blockProducer not in prodsActive: Utils.cmdError("unexpected block producer %s at blockNum=%s" % (blockProducer,blockNum)) - errorExit("Failed because of invalid block producer") + Utils.errorExit("Failed because of invalid block producer") if not prodsActive[blockProducer]: Utils.cmdError("block producer %s for blockNum=%s not elected, belongs to node %s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer])) - errorExit("Failed because of incorrect block producer") + Utils.errorExit("Failed because of incorrect block producer") prodsSeen[blockProducer]=True -def getNextCleanProductionCycle(trans, node): - transId=Node.getTransId(trans) - rounds=21*12*2 # max time to ensure that at least 2/3+1 of producers x blocks per producer x at least 2 times - node.waitForTransFinalization(transId, timeout=rounds/2) - irreversibleBlockNum=node.getIrreversibleBlockNum() - - # The voted schedule should be promoted now, then need to wait for that to become irreversible - votingTallyWindow=120 #could be up to 120 blocks before the votes were tallied - promotedBlockNum=node.getHeadBlockNum()+votingTallyWindow - node.waitForIrreversibleBlock(promotedBlockNum, timeout=rounds/2) - - ibnSchedActive=node.getIrreversibleBlockNum() - - blockNum=node.getHeadBlockNum() - Utils.Print("Searching for clean production cycle blockNum=%s ibn=%s transId=%s promoted bn=%s ibn for schedule active=%s" % (blockNum,irreversibleBlockNum,transId,promotedBlockNum,ibnSchedActive)) - blockProducer=getBlockProducer(node, blockNum) - blockNum+=1 - Utils.Print("Advance until the next block producer is retrieved") - while blockProducer == getBlockProducer(node, blockNum): - blockNum+=1 - - blockProducer=getBlockProducer(node, blockNum) - return blockNum - def setActiveProducers(prodsActive, activeProducers): for prod in prodsActive: prodsActive[prod]=prod in activeProducers def verifyProductionRounds(trans, node, prodsActive, rounds): - blockNum=getNextCleanProductionCycle(trans, node) + blockNum=node.getNextCleanProductionCycle(trans) Utils.Print("Validating blockNum=%s" % (blockNum)) temp=Utils.Debug Utils.Debug=False Utils.Print("FIND VALID BLOCK PRODUCER") - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) lastBlockProducer=blockProducer adjust=False while not isValidBlockProducer(prodsActive, blockNum, node): adjust=True - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) if lastBlockProducer!=blockProducer: Utils.Print("blockProducer=%s for blockNum=%s is for node=%s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer])) lastBlockProducer=blockProducer @@ -132,7 +88,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): Utils.Print("saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum)) lastBlockProducer=blockProducer saw=1 - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) blockNum+=1 if adjust: @@ -147,34 +103,34 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): lastBlockProducer=None for j in range(0, 21): # each new set of 12 blocks should have a different blockProducer - if lastBlockProducer is not None and lastBlockProducer==getBlockProducer(node, blockNum): + if lastBlockProducer is not None and lastBlockProducer==node.getBlockProducerByNum(blockNum): Utils.cmdError("expected blockNum %s to be produced by any of the valid producers except %s" % (blockNum, lastBlockProducer)) - errorExit("Failed because of incorrect block producer order") + Utils.errorExit("Failed because of incorrect block producer order") # make sure that the next set of 12 blocks all have the same blockProducer - lastBlockProducer=getBlockProducer(node, blockNum) + lastBlockProducer=node.getBlockProducerByNum(blockNum) for k in range(0, 12): validBlockProducer(prodsActive, prodsSeen, blockNum, node1) - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) if lastBlockProducer!=blockProducer: printStr="" newBlockNum=blockNum-18 for l in range(0,36): printStr+="%s" % (newBlockNum) printStr+=":" - newBlockProducer=getBlockProducer(node, newBlockNum) + newBlockProducer=node.getBlockProducerByNum(newBlockNum) printStr+="%s" % (newBlockProducer) printStr+=" " newBlockNum+=1 Utils.cmdError("expected blockNum %s (started from %s) to be produced by %s, but produded by %s: round=%s, prod slot=%s, prod num=%s - %s" % (blockNum, startingFrom, lastBlockProducer, blockProducer, i, j, k, printStr)) - errorExit("Failed because of incorrect block producer order") + Utils.errorExit("Failed because of incorrect block producer order") blockNum+=1 # make sure that we have seen all 21 producers prodsSeenKeys=prodsSeen.keys() if len(prodsSeenKeys)!=21: Utils.cmdError("only saw %s producers of expected 21. At blockNum %s only the following producers were seen: %s" % (len(prodsSeenKeys), blockNum, ",".join(prodsSeenKeys))) - errorExit("Failed because of missing block producers") + Utils.errorExit("Failed because of missing block producers") Utils.Debug=temp @@ -209,16 +165,16 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin) is False: + if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False: Utils.cmdError("launcher") - errorExit("Failed to stand up eos cluster.") + Utils.errorExit("Failed to stand up eos cluster.") Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) accounts=cluster.createAccountKeys(5) if accounts is None: - errorExit("FAILURE - create keys") + Utils.errorExit("FAILURE - create keys") accounts[0].name="tester111111" accounts[1].name="tester222222" accounts[2].name="tester333333" @@ -232,7 +188,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): walletMgr.cleanup() if walletMgr.launch() is False: Utils.cmdError("%s" % (WalletdName)) - errorExit("Failed to stand up eos walletd.") + Utils.errorExit("Failed to stand up eos walletd.") testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]]) @@ -273,7 +229,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): #first account will vote for node0 producers, all others will vote for node1 producers node=node0 for account in accounts: - trans=vote(node, account, node.producers) + trans=node.vote(account, node.producers) node=node1 setActiveProducers(prodsActive, node1.producers) @@ -284,7 +240,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): # first account will vote for node2 producers, all others will vote for node3 producers node1 for account in accounts: - trans=vote(node, account, node.producers) + trans=node.vote(account, node.producers) node=node2 setActiveProducers(prodsActive, node2.producers) diff --git a/tests/p2p_network_test.py b/tests/p2p_network_test.py index bdd7ed16f00..49bd746e4e9 100755 --- a/tests/p2p_network_test.py +++ b/tests/p2p_network_test.py @@ -23,6 +23,7 @@ parser = argparse.ArgumentParser(add_help=False) Print=testUtils.Utils.Print +cmdError=Utils.cmdError errorExit=Utils.errorExit # Override default help argument so that only --help (and not -h) can call help diff --git a/tests/testUtils.py b/tests/testUtils.py index ad77cd20c4c..d2a69231513 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -1,11 +1,13 @@ import subprocess import time import os +from collections import deque from collections import namedtuple import inspect import json import shlex from sys import stdout +from sys import exit import traceback ########################################################################################### @@ -24,6 +26,7 @@ class Utils: EosLauncherPath="programs/eosio-launcher/eosio-launcher" MongoPath="mongo" ShuttingDown=False + CheckOutputDeque=deque(maxlen=10) @staticmethod def Print(*args, **kwargs): @@ -76,6 +79,7 @@ def checkOutput(cmd): assert(isinstance(cmd, list)) popen=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output,error)=popen.communicate() + Utils.CheckOutputDeque.append((output,error,cmd)) if popen.returncode != 0: raise subprocess.CalledProcessError(returncode=popen.returncode, cmd=cmd, output=error) return output.decode("utf-8") @@ -185,3 +189,17 @@ def __str__(self): return "Name: %s" % (self.name) ########################################################################################### + +def addEnum(enumClassType, type): + setattr(enumClassType, type, enumClassType(type)) + +def unhandledEnumType(type): + raise RuntimeError("No case defined for type=%s" % (type.type)) + +class EnumType: + + def __init__(self, type): + self.type=type + + def __str__(self): + return self.type diff --git a/tools/eosiocpp.in b/tools/eosiocpp.in index 3e8f84e96d7..dd56f43dd1d 100755 --- a/tools/eosiocpp.in +++ b/tools/eosiocpp.in @@ -150,6 +150,11 @@ function print_help { echo " Generate the ABI specification file [EXPERIMENTAL]" } +function print_deprecation_notice { + echo -e "\033[0;33mWARNING: this tool is deprecated and will be removed in a future release\033[0m" 1>&2 + echo -e "\033[0;33mPlease consider using the EOSIO.CDT (https://github.com/EOSIO/eosio.cdt/)\033[0m" 1>&2 +} + command="" while [[ $# -gt 1 ]] @@ -185,6 +190,8 @@ case $key in esac done +print_deprecation_notice + if [[ "outname" == "$command" ]]; then build_contract $@ elif [[ "newcontract" == "$command" ]]; then diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 7442b00a69e..328b66a4462 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -28,7 +28,7 @@ target_include_directories( unit_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/include ) -add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index exchange eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig payloadless tic_tac_toe deferred_test) +add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig payloadless tic_tac_toe deferred_test) #Manually run unit_test for all supported runtimes #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose @@ -38,6 +38,9 @@ add_test(NAME unit_test_binaryen COMMAND unit_test add_test(NAME unit_test_wavm COMMAND unit_test -t \!wasm_tests/weighted_cpu_limit_tests --report_level=detailed --color_output --catch_system_errors=no -- --wavm) + add_test(NAME unit_test_wabt COMMAND unit_test + -t \!wasm_tests/weighted_cpu_limit_tests + --report_level=detailed --color_output -- --wabt) if(ENABLE_COVERAGE_TESTING) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index 3c6b4d55a01..44677b261c7 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -50,6 +51,22 @@ fc::variant verify_byte_round_trip_conversion( const abi_serializer& abis, const return var2; } +void verify_round_trip_conversion( const abi_serializer& abis, const type_name& type, const std::string& json, const std::string& hex, const std::string& expected_json ) +{ + auto var = fc::json::from_string(json); + auto bytes = abis.variant_to_binary(type, var, max_serialization_time); + BOOST_REQUIRE_EQUAL(fc::to_hex(bytes), hex); + auto var2 = abis.binary_to_variant(type, bytes, max_serialization_time); + BOOST_REQUIRE_EQUAL(fc::json::to_string(var2), expected_json); + auto bytes2 = abis.variant_to_binary(type, var2, max_serialization_time); + BOOST_REQUIRE_EQUAL(fc::to_hex(bytes2), hex); +} + +void verify_round_trip_conversion( const abi_serializer& abis, const type_name& type, const std::string& json, const std::string& hex ) +{ + verify_round_trip_conversion( abis, type, json, hex, json ); +} + auto get_resolver(const abi_def& abi = abi_def()) { return [&abi](const account_name &name) -> optional { @@ -83,7 +100,7 @@ fc::variant verify_type_round_trip_conversion( const abi_serializer& abis, const const char* my_abi = R"=====( { - "version": "", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "type_name", "type": "string" @@ -478,6 +495,7 @@ BOOST_AUTO_TEST_CASE(uint_types) const char* currency_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "transfer", @@ -539,10 +557,11 @@ struct abi_gen_helper { std::string stdc_include_param = std::string("-I") + eosiolib_path + "/musl/upstream/include"; abi_def output; + output.version = "eosio::abi/1.0"; std::string contract; std::vector actions; - + auto extra_args = std::vector{"-fparse-all-comments", "--std=c++14", "--target=wasm32", "-ffreestanding", "-nostdlib", "-nostdlibinc", "-fno-threadsafe-statics", "-fno-rtti", "-fno-exceptions", include_param, boost_include_param, stdcpp_include_param, @@ -551,7 +570,7 @@ struct abi_gen_helper { bool res = runToolOnCodeWithArgs( new find_eosio_abi_macro_action(contract, actions, ""), source, - extra_args + extra_args ); FC_ASSERT(res == true); @@ -645,6 +664,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_all_types, abi_gen_helper) const char* all_types_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "test_struct", @@ -792,6 +812,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_double_action, abi_gen_helper) const char* double_action_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name" : "A", @@ -1010,6 +1031,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_full_table_decl, abi_gen_helper) const char* full_table_decl_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name" : "table1", @@ -1108,6 +1130,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_template_base, abi_gen_helper) const char* template_base_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name" : "base32", @@ -1163,6 +1186,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_action_and_table, abi_gen_helper) const char* action_and_table_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name" : "table_action", @@ -1222,6 +1246,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_simple_typedef, abi_gen_helper) const char* simple_typedef_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name" : "my_base_alias", "type" : "common_params" @@ -1288,6 +1313,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_field_typedef, abi_gen_helper) const char* field_typedef_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name" : "my_complex_field_alias", "type" : "complex_field" @@ -1363,6 +1389,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_vector_of_POD, abi_gen_helper) const char* abigen_vector_of_POD_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "table1", @@ -1436,6 +1463,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_vector_of_structs, abi_gen_helper) const char* abigen_vector_of_structs_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "my_struct", @@ -1541,6 +1569,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_vector_alias, abi_gen_helper) const char* abigen_vector_alias_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "array_of_rows", "type": "row[]" @@ -1617,6 +1646,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_eosioabi_macro, abi_gen_helper) const char* abigen_eosioabi_macro_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "hi", @@ -1679,6 +1709,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_contract_inheritance, abi_gen_helper) const char* abigen_contract_inheritance_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "hi", @@ -1976,7 +2007,7 @@ BOOST_AUTO_TEST_CASE(general) {"name":"table2","index_type":"indextype2","key_names":["keyname2"],"key_types":["typename2"],"type":"type2"} ], "abidef":{ - "version": "", + "version": "eosio::abi/1.0", "types" : [{"new_type_name":"new", "type":"old"}], "structs" : [{"name":"struct1", "base":"base1", "fields": [{"name":"name1", "type": "type1"}, {"name":"name2", "type": "type2"}] }], "actions" : [{"name":"action1","type":"type1", "ricardian_contract":""}], @@ -1985,7 +2016,7 @@ BOOST_AUTO_TEST_CASE(general) "abi_extensions": [] }, "abidef_arr": [{ - "version": "", + "version": "eosio::abi/1.0", "types" : [{"new_type_name":"new", "type":"old"}], "structs" : [{"name":"struct1", "base":"base1", "fields": [{"name":"name1", "type": "type1"}, {"name":"name2", "type": "type2"}] }], "actions" : [{"name":"action1","type":"type1", "ricardian_contract":""}], @@ -1993,7 +2024,7 @@ BOOST_AUTO_TEST_CASE(general) "ricardian_clauses": [], "abi_extensions": [] },{ - "version": "", + "version": "eosio::abi/1.0", "types" : [{"new_type_name":"new", "type":"old"}], "structs" : [{"name":"struct1", "base":"base1", "fields": [{"name":"name1", "type": "type1"}, {"name":"name2", "type": "type2"}] }], "actions" : [{"name":"action1","type":"type1", "ricardian_contract": ""}], @@ -2030,6 +2061,7 @@ BOOST_AUTO_TEST_CASE(abi_cycle) const char* struct_cycle_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "A", @@ -2374,7 +2406,7 @@ BOOST_AUTO_TEST_CASE(setabi_test) const char* abi_def_abi = R"=====( { - "version": "", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "type_name", "type": "string" @@ -2506,7 +2538,7 @@ BOOST_AUTO_TEST_CASE(setabi_test) const char* abi_string = R"=====( { - "version": "", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" @@ -2766,6 +2798,7 @@ BOOST_AUTO_TEST_CASE(packed_transaction) const char* packed_transaction_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "compression_type", "type": "int64" @@ -2848,6 +2881,7 @@ BOOST_AUTO_TEST_CASE(abi_type_repeat) const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -2908,6 +2942,7 @@ BOOST_AUTO_TEST_CASE(abi_struct_repeat) const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -2964,6 +2999,7 @@ BOOST_AUTO_TEST_CASE(abi_action_repeat) const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -3023,6 +3059,7 @@ BOOST_AUTO_TEST_CASE(abi_table_repeat) const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -3085,6 +3122,7 @@ BOOST_AUTO_TEST_CASE(abi_type_def) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3137,6 +3175,7 @@ BOOST_AUTO_TEST_CASE(abi_type_loop) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3180,6 +3219,7 @@ BOOST_AUTO_TEST_CASE(abi_type_redefine) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "account_name" @@ -3220,6 +3260,7 @@ BOOST_AUTO_TEST_CASE(abi_type_redefine_to_name) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "name", "type": "name" @@ -3241,6 +3282,7 @@ BOOST_AUTO_TEST_CASE(abi_type_nested_in_vector) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "store_t", @@ -3266,6 +3308,7 @@ BOOST_AUTO_TEST_CASE(abi_account_name_in_eosio_abi) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3308,6 +3351,7 @@ BOOST_AUTO_TEST_CASE(abi_large_array) try { const char* abi_str = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "hi", @@ -3345,6 +3389,7 @@ BOOST_AUTO_TEST_CASE(abi_is_type_recursion) try { const char* abi_str = R"=====( { + "version": "eosio::abi/1.0", "types": [ { "new_type_name": "a[]", @@ -3388,6 +3433,7 @@ BOOST_AUTO_TEST_CASE(abi_recursive_structs) try { const char* abi_str = R"=====( { + "version": "eosio::abi/1.0", "types": [], "structs": [ { @@ -3422,7 +3468,16 @@ BOOST_AUTO_TEST_CASE(abi_recursive_structs) "type": "a" } ] - } + }, + { + "name": "hi2", + "base": "", + "fields": [{ + "name": "user", + "type": "name" + } + ] + } ], "actions": [{ "name": "hi", @@ -3433,10 +3488,10 @@ BOOST_AUTO_TEST_CASE(abi_recursive_structs) "tables": [] } )====="; - + abi_serializer abis(fc::json::from_string(abi_str).as(), max_serialization_time); - string hi_data = "{\"user\":\"eosio\",\"arg2\":{\"user\":\"1\"}}"; - auto bin = abis.variant_to_binary("hi", fc::json::from_string(hi_data), max_serialization_time); + string hi_data = "{\"user\":\"eosio\"}"; + auto bin = abis.variant_to_binary("hi2", fc::json::from_string(hi_data), max_serialization_time); BOOST_CHECK_THROW( abis.binary_to_variant("hi", bin, max_serialization_time);, fc::exception ); } FC_LOG_AND_RETHROW() @@ -3471,4 +3526,209 @@ BOOST_AUTO_TEST_CASE(abi_deep_structs_validate) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(variants) +{ + auto duplicate_variant_abi = R"({ + "version": "eosio::abi/1.1", + "variants": [ + {"name": "v1", "types": ["int8", "string", "bool"]}, + {"name": "v1", "types": ["int8", "string", "bool"]}, + ], + })"; + + auto variant_abi_invalid_type = R"({ + "version": "eosio::abi/1.1", + "variants": [ + {"name": "v1", "types": ["int91", "string", "bool"]}, + ], + })"; + + auto variant_abi = R"({ + "version": "eosio::abi/1.1", + "types": [ + {"new_type_name": "foo", "type": "s"}, + {"new_type_name": "bar", "type": "s"}, + ], + "structs": [ + {"name": "s", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"}, + ]} + ], + "variants": [ + {"name": "v1", "types": ["int8", "string", "int16"]}, + {"name": "v2", "types": ["foo", "bar"]}, + ], + })"; + + try { + // round-trip abi through multiple formats + // json -> variant -> abi_def -> bin + auto bin = fc::raw::pack(fc::json::from_string(variant_abi).as()); + // bin -> abi_def -> variant -> abi_def + abi_serializer abis(variant(fc::raw::unpack(bin)).as(), max_serialization_time ); + + // duplicate variant definition detected + BOOST_CHECK_THROW( abi_serializer( fc::json::from_string(duplicate_variant_abi).as(), max_serialization_time ), duplicate_abi_variant_def_exception ); + + // invalid_type_inside_abi + BOOST_CHECK_THROW( abi_serializer( fc::json::from_string(variant_abi_invalid_type).as(), max_serialization_time ), invalid_type_inside_abi ); + + // expected array containing variant + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(9)"), max_serialization_time), abi_exception ); + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"([4])"), max_serialization_time), abi_exception ); + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"([4, 5])"), max_serialization_time), abi_exception ); + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(["4", 5, 6])"), max_serialization_time), abi_exception ); + + // type is not valid within this variant + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(["int9", 21])"), max_serialization_time), abi_exception ); + + verify_round_trip_conversion(abis, "v1", R"(["int8",21])", "0015"); + verify_round_trip_conversion(abis, "v1", R"(["string","abcd"])", "010461626364"); + verify_round_trip_conversion(abis, "v1", R"(["int16",3])", "020300"); + verify_round_trip_conversion(abis, "v1", R"(["int16",4])", "020400"); + verify_round_trip_conversion(abis, "v2", R"(["foo",{"i0":5,"i1":6}])", "000506"); + verify_round_trip_conversion(abis, "v2", R"(["bar",{"i0":5,"i1":6}])", "010506"); + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(extend) +{ + auto abi = R"({ + "version": "eosio::abi/1.1", + "structs": [ + {"name": "s", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"}, + {"name": "i2", "type": "int8$"}, + {"name": "a", "type": "int8[]$"}, + {"name": "o", "type": "int8?$"}, + ]} + ], + })"; + + try { + abi_serializer abis(fc::json::from_string(abi).as(), max_serialization_time ); + + // missing i1 + BOOST_CHECK_THROW( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5})"), max_serialization_time), abi_exception ); + + // Unexpected 'a' + BOOST_CHECK_THROW( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5,"i1":6,"a":[8,9,10]})"), max_serialization_time), pack_exception ); + + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6})", "0506"); + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7})", "050607"); + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10]})", "0506070308090a"); + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":null})", "0506070308090a00"); + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":31})", "0506070308090a011f"); + + verify_round_trip_conversion(abis, "s", R"([5,6])", "0506", R"({"i0":5,"i1":6})"); + verify_round_trip_conversion(abis, "s", R"([5,6,7])", "050607", R"({"i0":5,"i1":6,"i2":7})"); + verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10]])", "0506070308090a", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10]})"); + verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10],null])", "0506070308090a00", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":null})"); + verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10],31])", "0506070308090a011f", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":31})"); + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(version) +{ + try { + BOOST_CHECK_THROW( abi_serializer(fc::json::from_string(R"({})").as(), max_serialization_time), unsupported_abi_version_exception ); + BOOST_CHECK_THROW( abi_serializer(fc::json::from_string(R"({"version": ""})").as(), max_serialization_time), unsupported_abi_version_exception ); + BOOST_CHECK_THROW( abi_serializer(fc::json::from_string(R"({"version": "eosio::abi/9.0"})").as(), max_serialization_time), unsupported_abi_version_exception ); + abi_serializer(fc::json::from_string(R"({"version": "eosio::abi/1.0"})").as(), max_serialization_time); + abi_serializer(fc::json::from_string(R"({"version": "eosio::abi/1.1"})").as(), max_serialization_time); + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_array) +{ + using eosio::testing::fc_exception_message_starts_with; + + auto abi = R"({ + "version": "eosio::abi/1.0", + "structs": [ + {"name": "s", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"}, + {"name": "i2", "type": "int8"} + ]} + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Early end to array specifying the fields of struct") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([1,2])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Early end to array specifying the fields of struct") ); + + verify_round_trip_conversion(abis, "s", R"([1,2,3])", "010203", R"({"i0":1,"i1":2,"i2":3})"); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_object) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.0", + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"} + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "s1"} + {"name": "i2", "type": "int8"} + ]} + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing 'f0' in variant object") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":{"i0":1}})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing 'i1' in variant object") ); + + verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1,"i1":2},"i2":3})", "010203"); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(abi_serialize_json_mismatching_type) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.0", + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "s1"} + {"name": "i1", "type": "int8"} + ]} + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":1,"i1":2})"), max_serialization_time), + pack_exception, fc_exception_message_is("Failed to serialize struct 's1' in variant object") ); + + verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1},"i1":2})", "0102"); + + } FC_LOG_AND_RETHROW() +} + + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 8b2be0ad065..c9811adf071 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -437,6 +437,30 @@ BOOST_FIXTURE_TEST_CASE(action_tests, TESTER) { try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } +// test require_recipient loop (doesn't cause infinite loop) +BOOST_FIXTURE_TEST_CASE(require_notice_tests, TESTER) { try { + produce_blocks(2); + create_account( N(testapi) ); + create_account( N(acc5) ); + produce_blocks(1); + set_code( N(testapi), test_api_wast ); + set_code( N(acc5), test_api_wast ); + produce_blocks(1); + + // test require_notice + signed_transaction trx; + auto tm = test_api_action{}; + + action act( std::vector{{N( testapi ), config::active_name}}, tm ); + trx.actions.push_back( act ); + + set_transaction_headers( trx ); + trx.sign( get_private_key( N( testapi ), "active" ), control->get_chain_id() ); + auto res = push_transaction( trx ); + BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); + + } FC_LOG_AND_RETHROW() } + BOOST_FIXTURE_TEST_CASE(ram_billing_in_notify_tests, TESTER) { try { produce_blocks(2); create_account( N(testapi) ); @@ -989,15 +1013,16 @@ BOOST_FIXTURE_TEST_CASE(transaction_tests, TESTER) { try { ); { - produce_blocks(10); - transaction_trace_ptr trace; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->receipt->status != transaction_receipt::executed) { trace = t; } } ); + produce_blocks(10); + transaction_trace_ptr trace; + auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->receipt && t->receipt->status != transaction_receipt::executed) { trace = t; } } ); - // test error handling on deferred transaction failure - CALL_TEST_FUNCTION(*this, "test_transaction", "send_transaction_trigger_error_handler", {}); + // test error handling on deferred transaction failure + CALL_TEST_FUNCTION(*this, "test_transaction", "send_transaction_trigger_error_handler", {}); - BOOST_CHECK(trace); - BOOST_CHECK_EQUAL(trace->receipt->status, transaction_receipt::soft_fail); + BOOST_REQUIRE(trace); + BOOST_CHECK_EQUAL(trace->receipt->status, transaction_receipt::soft_fail); + c.disconnect(); } // test test_transaction_size @@ -1017,7 +1042,7 @@ BOOST_FIXTURE_TEST_CASE(transaction_tests, TESTER) { try { // test send_action_recurse BOOST_CHECK_EXCEPTION(CALL_TEST_FUNCTION(*this, "test_transaction", "send_action_recurse", {}), eosio::chain::transaction_exception, [](const eosio::chain::transaction_exception& e) { - return expect_assert_message(e, "inline action recursion depth reached"); + return expect_assert_message(e, "max inline action depth per transaction reached"); } ); diff --git a/unittests/block_tests.cpp b/unittests/block_tests.cpp index f196dbdae93..6c4129510c3 100644 --- a/unittests/block_tests.cpp +++ b/unittests/block_tests.cpp @@ -47,7 +47,119 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_test) [] (const fc::exception &e)->bool { return e.code() == account_name_exists_exception::code_value ; }) ; - + +} + +std::pair corrupt_trx_in_block(validating_tester& main, account_name act_name) { + // First we create a valid block with valid transaction + main.create_account(act_name); + signed_block_ptr b = main.produce_block_no_validation(); + + // Make a copy of the valid block and corrupt the transaction + auto copy_b = std::make_shared(*b); + auto signed_tx = copy_b->transactions.back().trx.get().get_signed_transaction(); + // Corrupt one signature + signed_tx.signatures.clear(); + signed_tx.sign(main.get_private_key(act_name, "active"), main.control->get_chain_id()); + + // Replace the valid transaction with the invalid transaction + auto invalid_packed_tx = packed_transaction(signed_tx); + copy_b->transactions.back().trx = invalid_packed_tx; + + // Re-calculate the transaction merkle + vector trx_digests; + const auto& trxs = copy_b->transactions; + trx_digests.reserve( trxs.size() ); + for( const auto& a : trxs ) + trx_digests.emplace_back( a.digest() ); + copy_b->transaction_mroot = merkle( move(trx_digests) ); + + // Re-sign the block + auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule_hash) ); + copy_b->producer_signature = main.get_private_key(b->producer, "active").sign(sig_digest); + return std::pair(b, copy_b); +} + +// verify that a block with a transaction with an incorrect signature, is blindly accepted from a trusted producer +BOOST_AUTO_TEST_CASE(trusted_producer_test) +{ + flat_set trusted_producers = { N(defproducera), N(defproducerc) }; + validating_tester main(trusted_producers); + // only using validating_tester to keep the 2 chains in sync, not to validate that the validating_node matches the main node, + // since it won't be + main.skip_validate = true; + + // First we create a valid block with valid transaction + std::set producers = { N(defproducera), N(defproducerb), N(defproducerc), N(defproducerd) }; + for (auto prod : producers) + main.create_account(prod); + auto b = main.produce_block(); + + std::vector schedule(producers.cbegin(), producers.cend()); + auto trace = main.set_producers(schedule); + + while (b->producer != N(defproducera)) { + b = main.produce_block(); + } + + auto blocks = corrupt_trx_in_block(main, N(tstproducera)); + main.validate_push_block( blocks.second ); +} + +// like trusted_producer_test, except verify that any entry in the trusted_producer list is accepted +BOOST_AUTO_TEST_CASE(trusted_producer_verify_2nd_test) +{ + flat_set trusted_producers = { N(defproducera), N(defproducerc) }; + validating_tester main(trusted_producers); + // only using validating_tester to keep the 2 chains in sync, not to validate that the validating_node matches the main node, + // since it won't be + main.skip_validate = true; + + // First we create a valid block with valid transaction + std::set producers = { N(defproducera), N(defproducerb), N(defproducerc), N(defproducerd) }; + for (auto prod : producers) + main.create_account(prod); + auto b = main.produce_block(); + + std::vector schedule(producers.cbegin(), producers.cend()); + auto trace = main.set_producers(schedule); + + while (b->producer != N(defproducerc)) { + b = main.produce_block(); + } + + auto blocks = corrupt_trx_in_block(main, N(tstproducera)); + main.validate_push_block( blocks.second ); +} + +// verify that a block with a transaction with an incorrect signature, is rejected if it is not from a trusted producer +BOOST_AUTO_TEST_CASE(untrusted_producer_test) +{ + flat_set trusted_producers = { N(defproducera), N(defproducerc) }; + validating_tester main(trusted_producers); + // only using validating_tester to keep the 2 chains in sync, not to validate that the validating_node matches the main node, + // since it won't be + main.skip_validate = true; + + // First we create a valid block with valid transaction + std::set producers = { N(defproducera), N(defproducerb), N(defproducerc), N(defproducerd) }; + for (auto prod : producers) + main.create_account(prod); + auto b = main.produce_block(); + + std::vector schedule(producers.cbegin(), producers.cend()); + auto trace = main.set_producers(schedule); + + while (b->producer != N(defproducerb)) { + b = main.produce_block(); + } + + auto blocks = corrupt_trx_in_block(main, N(tstproducera)); + BOOST_REQUIRE_EXCEPTION(main.validate_push_block( blocks.second ), fc::exception , + [] (const fc::exception &e)->bool { + return e.code() == unsatisfied_authorization::code_value ; + }) ; } BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/exchange_tests.cpp b/unittests/exchange_tests.cpp deleted file mode 100644 index 28e8ac563ba..00000000000 --- a/unittests/exchange_tests.cpp +++ /dev/null @@ -1,358 +0,0 @@ -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include - -#include -#include - -#ifdef NON_VALIDATING_TEST -#define TESTER tester -#else -#define TESTER validating_tester -#endif - -using namespace eosio; -using namespace eosio::chain; -using namespace eosio::testing; -using namespace fc; - -#define A(X) asset::from_string( #X ) - -struct margin_state { - extended_asset total_lendable; - extended_asset total_lent; - double least_collateralized = 0; - double interest_shares = 0; -}; -FC_REFLECT( margin_state, (total_lendable)(total_lent)(least_collateralized)(interest_shares) ) - -struct exchange_state { - account_name manager; - extended_asset supply; - uint32_t fee = 0; - - struct connector { - extended_asset balance; - uint32_t weight = 500; - margin_state peer_margin; - }; - - connector base; - connector quote; -}; - -FC_REFLECT( exchange_state::connector, (balance)(weight)(peer_margin) ); -FC_REFLECT( exchange_state, (manager)(supply)(fee)(base)(quote) ); - -class exchange_tester : public TESTER { - public: - auto push_action(account_name contract, - const account_name& signer, - const action_name &name, const variant_object &data ) { - string action_type_name = abi_ser.get_action_type(name); - - action act; - act.account = contract; - act.name = name; - act.authorization = vector{{signer, config::active_name}}; - act.data = abi_ser.variant_to_binary(action_type_name, data, abi_serializer_max_time); - - signed_transaction trx; - trx.actions.emplace_back(std::move(act)); - set_transaction_headers(trx); - trx.sign(get_private_key(signer, "active"), control->get_chain_id()); - return push_transaction(trx); - } - - asset get_balance(const account_name& account) const { - return get_currency_balance(N(exchange), symbol(SY(4,CUR)), account); - } - - exchange_state get_market_state( account_name exchange, symbol sym ) { - - uint64_t s = sym.value() >> 8; - const auto& db = control->db(); - const auto* tbl = db.find(boost::make_tuple(exchange, s, N(markets))); - if (tbl) { - const auto *obj = db.find(boost::make_tuple(tbl->id, s)); - if( obj ) { - fc::datastream ds(obj->value.data(), obj->value.size()); - exchange_state result; - fc::raw::unpack( ds, result ); - return result; - } - } - FC_ASSERT( false, "unknown market state" ); - } - - extended_asset get_exchange_balance( account_name exchange, account_name currency, - symbol sym, account_name owner ) { - const auto& db = control->db(); - const auto* tbl = db.find(boost::make_tuple(exchange, owner, N(exaccounts))); - - if (tbl) { - const auto *obj = db.find(boost::make_tuple(tbl->id, owner)); - if( obj ) { - fc::datastream ds(obj->value.data(), obj->value.size()); - account_name own; - flat_map, int64_t> balances; - - fc::raw::unpack( ds, own ); - fc::raw::unpack( ds, balances); - - // wdump((balances)); - auto b = balances[ make_pair( sym, currency ) ]; - return extended_asset( asset( b, sym ), currency ); - } - } - return extended_asset(); - } - - double get_lent_shares( account_name exchange, symbol market, account_name owner, bool base ) - { - const auto& db = control->db(); - - auto scope = ((market.value() >> 8) << 4) + (base ? 1 : 2); - - const auto* tbl = db.find(boost::make_tuple(exchange, scope, N(loans))); - - if (tbl) { - const auto *obj = db.find(boost::make_tuple(tbl->id, owner)); - if( obj ) { - fc::datastream ds(obj->value.data(), obj->value.size()); - account_name own; - double interest_shares; - - fc::raw::unpack( ds, own ); - fc::raw::unpack( ds, interest_shares); - - return interest_shares; - } - } - FC_ASSERT( false, "unable to find loan balance" ); - } - - void deploy_exchange( account_name ac ) { - create_account( ac ); - set_code( ac, exchange_wast ); - } - - void create_currency( name contract, name signer, asset maxsupply ) { - push_action(contract, signer, N(create), mutable_variant_object() - ("issuer", contract ) - ("maximum_supply", maxsupply ) - ("can_freeze", 0) - ("can_recall", 0) - ("can_whitelist", 0) - ); - } - - void issue( name contract, name signer, name to, asset amount ) { - push_action( contract, signer, N(issue), mutable_variant_object() - ("to", to ) - ("quantity", amount ) - ("memo", "") - ); - } - - auto trade( name ex_contract, name signer, symbol market, - extended_asset sell, extended_asset min_receive ) - { - wdump((market)(sell)(min_receive)); - wdump((market.to_string())); - wdump((fc::variant(market).as_string())); - wdump((fc::variant(market).as())); - return push_action( ex_contract, signer, N(trade), mutable_variant_object() - ("seller", signer ) - ("market", market ) - ("sell", sell) - ("min_receive", min_receive) - ("expire", 0) - ("fill_or_kill", 1) - ); - } - - auto deposit( name exchangecontract, name signer, extended_asset amount ) { - return push_action( amount.contract, signer, N(transfer), mutable_variant_object() - ("from", signer ) - ("to", exchangecontract ) - ("quantity", amount.quantity ) - ("memo", "deposit") - ); - } - - auto lend( name contract, name signer, extended_asset quantity, symbol market ) { - return push_action( contract, signer, N(lend), mutable_variant_object() - ("lender", signer ) - ("market", market ) - ("quantity", quantity ) - ); - } - auto unlend( name contract, name signer, double interest_shares, extended_symbol interest_symbol, symbol market ) { - return push_action( contract, signer, N(unlend), mutable_variant_object() - ("lender", signer ) - ("market", market ) - ("interest_shares", interest_shares) - ("interest_symbol", interest_symbol) - ); - } - - auto create_exchange( name contract, name signer, - extended_asset base_deposit, - extended_asset quote_deposit, - asset exchange_supply ) { - return push_action( contract, signer, N(createx), mutable_variant_object() - ("creator", signer) - ("initial_supply", exchange_supply) - ("fee", 0) - ("base_deposit", base_deposit) - ("quote_deposit", quote_deposit) - ); - } - - - exchange_tester() - :TESTER(),abi_ser(json::from_string(exchange_abi).as(), abi_serializer_max_time) - { - create_account( N(dan) ); - create_account( N(trader) ); - - deploy_exchange( N(exchange) ); - - create_currency( N(exchange), N(exchange), A(1000000.00 USD) ); - create_currency( N(exchange), N(exchange), A(1000000.00 BTC) ); - - issue( N(exchange), N(exchange), N(dan), A(1000.00 USD) ); - issue( N(exchange), N(exchange), N(dan), A(1000.00 BTC) ); - - deposit( N(exchange), N(dan), extended_asset( A(500.00 USD), N(exchange) ) ); - deposit( N(exchange), N(dan), extended_asset( A(500.00 BTC), N(exchange) ) ); - - create_exchange( N(exchange), N(dan), - extended_asset( A(400.00 USD), N(exchange) ), - extended_asset( A(400.00 BTC), N(exchange) ), - A(10000000.00 EXC) ); - - produce_block(); - } - - abi_serializer abi_ser; -}; - -BOOST_AUTO_TEST_SUITE(exchange_tests) - -BOOST_AUTO_TEST_CASE( bootstrap ) try { - auto expected = asset::from_string( "1000000.0000 CUR" ); - exchange_tester t; - t.create_currency( N(exchange), N(exchange), expected ); - t.issue( N(exchange), N(exchange), N(exchange), expected ); - auto actual = t.get_currency_balance(N(exchange), expected.get_symbol(), N(exchange)); - BOOST_REQUIRE_EQUAL(expected, actual); -} FC_LOG_AND_RETHROW() /// test_api_bootstrap - - -BOOST_AUTO_TEST_CASE( exchange_create ) try { - auto expected = asset::from_string( "1000000.0000 CUR" ); - exchange_tester t; - - t.issue( N(exchange), N(exchange), N(trader), A(2000.00 BTC) ); - t.issue( N(exchange), N(exchange), N(trader), A(2000.00 USD) ); - - t.deposit( N(exchange), N(trader), extended_asset( A(1500.00 USD), N(exchange) ) ); - t.deposit( N(exchange), N(trader), extended_asset( A(1500.00 BTC), N(exchange) ) ); - - auto trader_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(trader) ); - auto trader_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(trader) ); - auto dan_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(dan) ); - auto dan_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(dan) ); - - auto dan_ex_exc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"EXC"), N(dan) ); - wdump((dan_ex_exc)); - - auto result = t.trade( N(exchange), N(trader), symbol(2,"EXC"), - extended_asset( A(10.00 BTC), N(exchange) ), - extended_asset( A(0.01 USD), N(exchange) ) ); - - for( const auto& at : result->action_traces ) - ilog( "${s}", ("s",at.console) ); - - trader_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(trader) ); - trader_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(trader) ); - wdump((trader_ex_btc.quantity)); - wdump((trader_ex_usd.quantity)); - - result = t.trade( N(exchange), N(trader), symbol(2,"EXC"), - extended_asset( A(9.75 USD), N(exchange) ), - extended_asset( A(0.01 BTC), N(exchange) ) ); - - trader_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(trader) ); - trader_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(trader) ); - - for( const auto& at : result->action_traces ) - ilog( "${s}", ("s",at.console) ); - - wdump((trader_ex_btc.quantity)); - wdump((trader_ex_usd.quantity)); - - BOOST_REQUIRE_EQUAL( trader_ex_usd.quantity, A(1500.00 USD) ); - BOOST_REQUIRE_EQUAL( trader_ex_btc.quantity, A(1499.99 BTC) ); - - wdump((t.get_market_state( N(exchange), symbol(2,"EXC") ) )); - - //BOOST_REQUIRE_EQUAL(expected, actual); -} FC_LOG_AND_RETHROW() /// test_api_bootstrap - - -BOOST_AUTO_TEST_CASE( exchange_lend ) try { - exchange_tester t; - - t.create_account( N(lender) ); - t.issue( N(exchange), N(exchange), N(lender), A(2000.00 BTC) ); - t.issue( N(exchange), N(exchange), N(lender), A(2000.00 USD) ); - - t.deposit( N(exchange), N(lender), extended_asset( A(1500.00 USD), N(exchange) ) ); - t.deposit( N(exchange), N(lender), extended_asset( A(1500.00 BTC), N(exchange) ) ); - - auto lender_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(lender) ); - auto lender_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(lender) ); - - t.lend( N(exchange), N(lender), extended_asset( A(1000.00 USD), N(exchange) ), symbol(2,"EXC") ); - - lender_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(lender) ); - lender_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(lender) ); - - wdump((lender_ex_btc.quantity)); - wdump((lender_ex_usd.quantity)); - - BOOST_REQUIRE_EQUAL( lender_ex_usd.quantity, A(500.00 USD) ); - - auto lentshares = t.get_lent_shares( N(exchange), symbol(2,"EXC"), N(lender), true ); - wdump((lentshares)); - BOOST_REQUIRE_EQUAL( lentshares, 100000 ); - - wdump((t.get_market_state( N(exchange), symbol(2,"EXC") ) )); - - t.unlend( N(exchange), N(lender), lentshares, extended_symbol{ symbol(2,"USD"), N(exchange)}, symbol(2,"EXC") ); - - lentshares = t.get_lent_shares( N(exchange), symbol(2,"EXC"), N(lender), true ); - wdump((lentshares)); - - wdump((t.get_market_state( N(exchange), symbol(2,"EXC") ) )); - - //BOOST_REQUIRE_EQUAL(expected, actual); -} FC_LOG_AND_RETHROW() /// test_api_bootstrap - - - - -BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/multisig_tests.cpp b/unittests/multisig_tests.cpp index debf5d95f7c..4e32a60c6a7 100644 --- a/unittests/multisig_tests.cpp +++ b/unittests/multisig_tests.cpp @@ -6,9 +6,6 @@ #include #include -#include -#include - #include #include @@ -322,7 +319,7 @@ BOOST_FIXTURE_TEST_CASE( propose_with_wrong_requested_auth, eosio_msig_tester ) BOOST_FIXTURE_TEST_CASE( big_transaction, eosio_msig_tester ) try { vector perm = { { N(alice), config::active_name }, { N(bob), config::active_name } }; - auto wasm = wast_to_wasm( exchange_wast ); + auto wasm = wast_to_wasm( eosio_token_wast ); variant pretty_trx = fc::mutable_variant_object() ("expiration", "2020-01-01T00:30") @@ -396,7 +393,7 @@ BOOST_FIXTURE_TEST_CASE( update_system_contract_all_approve, eosio_msig_tester ) set_authority(config::system_account_name, "active", authority(1, vector{{get_private_key("eosio", "active").get_public_key(), 1}}, - vector{{{N(eosio.prods), config::active_name}, 1}}), "owner", + vector{{{config::producers_account_name, config::active_name}, 1}}), "owner", { { config::system_account_name, "active" } }, { get_private_key( config::system_account_name, "active" ) }); set_producers( {N(alice),N(bob),N(carol)} ); @@ -507,7 +504,7 @@ BOOST_FIXTURE_TEST_CASE( update_system_contract_major_approve, eosio_msig_tester // set up the link between (eosio active) and (eosio.prods active) set_authority(config::system_account_name, "active", authority(1, vector{{get_private_key("eosio", "active").get_public_key(), 1}}, - vector{{{N(eosio.prods), config::active_name}, 1}}), "owner", + vector{{{config::producers_account_name, config::active_name}, 1}}), "owner", { { config::system_account_name, "active" } }, { get_private_key( config::system_account_name, "active" ) }); create_accounts( { N(apple) } ); diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index ed359a703a7..1621e2ef916 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -45,6 +45,8 @@ class whitelist_blacklist_tester { cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) + cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; else cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; }