From 14546f1251a0ef58d45b22e4c17c4390a7b2d4e4 Mon Sep 17 00:00:00 2001 From: thaipandada Date: Thu, 29 Nov 2018 17:25:46 +0800 Subject: [PATCH 01/21] finish BOS basic functions --- .gitlab-ci.yml | 2 +- CMakeLists.txt | 6 +- CMakeModules/EosioTester.cmake.in | 2 +- CMakeModules/FindCppkafka.cmake | 29 ++ Docker/Dockerfile | 14 +- Docker/README.md | 38 +- Docker/builder/Dockerfile | 19 +- Docker/config.ini | 5 + Docker/dev/Dockerfile | 4 +- README.md | 106 +----- contracts/eosio.system/delegate_bandwidth.cpp | 2 +- contracts/eosio.system/eosio.system.hpp | 1 - contracts/eosio.system/voting.cpp | 11 +- contracts/eosiolib/transaction.h | 22 ++ eosio_build.sh | 35 +- eosio_install.sh | 22 +- libraries/chain/apply_context.cpp | 4 + libraries/chain/block_header.cpp | 10 + libraries/chain/block_header_state.cpp | 4 +- libraries/chain/chain_config.cpp | 6 + libraries/chain/controller.cpp | 155 +++++++- libraries/chain/fork_database.cpp | 2 - .../include/eosio/chain/apply_context.hpp | 1 + libraries/chain/include/eosio/chain/block.hpp | 3 + .../include/eosio/chain/block_header.hpp | 9 +- .../include/eosio/chain/chain_config.hpp | 21 ++ .../chain/include/eosio/chain/config.hpp | 1 + .../chain/include/eosio/chain/config_xos.hpp | 23 ++ .../chain/include/eosio/chain/controller.hpp | 28 +- .../eosio/chain/global_property_object.hpp | 24 ++ .../include/eosio/chain/resource_limits.hpp | 22 +- .../eosio/chain/resource_limits_private.hpp | 25 +- libraries/chain/include/eosio/chain/types.hpp | 2 + libraries/chain/resource_limits.cpp | 48 ++- libraries/chain/wasm_interface.cpp | 125 ++++++- .../testing/include/eosio/testing/tester.hpp | 2 +- plugins/CMakeLists.txt | 2 + .../history_api_plugin/history_api_plugin.cpp | 4 +- plugins/history_plugin/history_plugin.cpp | 93 ++++- .../eosio/history_plugin/history_plugin.hpp | 20 +- plugins/kafka_plugin/CMakeLists.txt | 10 + plugins/kafka_plugin/fifo.h | 85 +++++ plugins/kafka_plugin/kafka.cpp | 185 +++++++++ plugins/kafka_plugin/kafka.hpp | 46 +++ plugins/kafka_plugin/kafka_plugin.cpp | 166 +++++++++ plugins/kafka_plugin/kafka_plugin.hpp | 41 ++ plugins/kafka_plugin/try_handle.cpp | 17 + plugins/kafka_plugin/try_handle.hpp | 9 + plugins/kafka_plugin/types.hpp | 93 +++++ .../include/eosio/net_plugin/protocol.hpp | 15 +- plugins/net_plugin/net_plugin.cpp | 135 ++++++- plugins/notify_plugin/CMakeLists.txt | 7 + plugins/notify_plugin/README.md | 73 ++++ .../eosio/notify_plugin/http_async_client.hpp | 104 ++++++ .../eosio/notify_plugin/notify_plugin.hpp | 33 ++ plugins/notify_plugin/notify_plugin.cpp | 350 ++++++++++++++++++ plugins/producer_plugin/producer_plugin.cpp | 7 +- programs/cleos/httpc.hpp | 1 + programs/cleos/main.cpp | 20 +- programs/nodeos/CMakeLists.txt | 4 + scripts/eosio_build_amazon.sh | 120 ++++++ scripts/eosio_build_centos.sh | 120 ++++++ scripts/eosio_build_darwin.sh | 120 ++++++ scripts/eosio_build_fedora.sh | 120 ++++++ scripts/eosio_build_ubuntu.sh | 120 ++++++ unittests/actiondemo/actiondemo.abi | 99 +++++ unittests/actiondemo/actiondemo.cpp | 106 ++++++ unittests/actiondemo/actiondemo.hpp | 50 +++ unittests/actiondemo/test.py | 223 +++++++++++ unittests/database_gmr_blklst_tests.cpp | 309 ++++++++++++++++ unittests/database_tests.cpp | 3 +- unittests/gmr_test.cpp | 234 ++++++++++++ 72 files changed, 3785 insertions(+), 192 deletions(-) create mode 100644 CMakeModules/FindCppkafka.cmake create mode 100644 libraries/chain/include/eosio/chain/config_xos.hpp create mode 100644 plugins/kafka_plugin/CMakeLists.txt create mode 100644 plugins/kafka_plugin/fifo.h create mode 100644 plugins/kafka_plugin/kafka.cpp create mode 100644 plugins/kafka_plugin/kafka.hpp create mode 100644 plugins/kafka_plugin/kafka_plugin.cpp create mode 100644 plugins/kafka_plugin/kafka_plugin.hpp create mode 100644 plugins/kafka_plugin/try_handle.cpp create mode 100644 plugins/kafka_plugin/try_handle.hpp create mode 100644 plugins/kafka_plugin/types.hpp create mode 100644 plugins/notify_plugin/CMakeLists.txt create mode 100644 plugins/notify_plugin/README.md create mode 100644 plugins/notify_plugin/include/eosio/notify_plugin/http_async_client.hpp create mode 100644 plugins/notify_plugin/include/eosio/notify_plugin/notify_plugin.hpp create mode 100644 plugins/notify_plugin/notify_plugin.cpp create mode 100644 unittests/actiondemo/actiondemo.abi create mode 100644 unittests/actiondemo/actiondemo.cpp create mode 100644 unittests/actiondemo/actiondemo.hpp create mode 100644 unittests/actiondemo/test.py create mode 100644 unittests/database_gmr_blklst_tests.cpp create mode 100644 unittests/gmr_test.cpp diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a51b6e9564e..622a46f2642 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,4 +4,4 @@ build: script: - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY - docker build -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA -f Docker/Dockerfile . - - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA + - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 360d6c973ab..b1511b82a0d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,14 +28,14 @@ include( SetupTargetMacros ) include( InstallDirectoryPermissions ) include( MASSigning ) -set( BLOCKCHAIN_NAME "EOSIO" ) +set( BLOCKCHAIN_NAME "BOS" ) set( CMAKE_CXX_STANDARD 14 ) set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 4) -set(VERSION_PATCH 4) +set(VERSION_MINOR 0) +set(VERSION_PATCH 1) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 741ee5f0e84..ac2bc0221fa 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -97,8 +97,8 @@ macro(add_eosio_test test_name) ${liblogging} ${libchainbase} ${libbuiltins} - ${GMP_LIBRARIES} ${libsecp256k1} + ${GMP_LIBRARIES} LLVMX86Disassembler LLVMX86AsmParser diff --git a/CMakeModules/FindCppkafka.cmake b/CMakeModules/FindCppkafka.cmake new file mode 100644 index 00000000000..aa8bc82ba85 --- /dev/null +++ b/CMakeModules/FindCppkafka.cmake @@ -0,0 +1,29 @@ +# Override default CMAKE_FIND_LIBRARY_SUFFIXES +if (CPPKAFKA_SHARED_LIB) + set(CPPKAFKA_SUFFIX so) +else() + set(CPPKAFKA_SUFFIX a) +endif() +message(STATUS "Cppkafka finding .${CPPKAFKA_SUFFIX} library") + +FIND_PATH( + CPPKAFKA_INCLUDE_DIR cppkafka.h + PATH "/usr/local" + PATH_SUFFIXES "" "cppkafka") +MARK_AS_ADVANCED(CPPKAFKA_INCLUDE_DIR) + +SET(CPPKAFKA_INCLUDE_DIR ${CPPKAFKA_INCLUDE_DIR}) + +FIND_LIBRARY( + CPPKAFKA_LIBRARY + NAMES cppkafka.${CPPKAFKA_SUFFIX} libcppkafka.${CPPKAFKA_SUFFIX} + HINTS ${CPPKAFKA_INCLUDE_DIR}/.. + PATH_SUFFIXES lib${LIB_SUFFIX}) +MARK_AS_ADVANCED(CPPKAFKA_LIBRARY) + +SET(CPPKAFKA_LIBRARY ${CPPKAFKA_LIBRARY}) +message(STATUS "Cppkafka found ${CPPKAFKA_LIBRARY}") + +include(FindPackageHandleStandardArgs) +SET(_CPPKAFKA_REQUIRED_VARS CPPKAFKA_INCLUDE_DIR CPPKAFKA_LIBRARY) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(Cppkafka DEFAULT_MSG ${_CPPKAFKA_REQUIRED_VARS}) diff --git a/Docker/Dockerfile b/Docker/Dockerfile index 24dd447ed75..67f7714c894 100644 --- a/Docker/Dockerfile +++ b/Docker/Dockerfile @@ -1,11 +1,15 @@ -FROM eosio/builder as builder +FROM boscore/builder as builder ARG branch=master ARG symbol=SYS -RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ - && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ +ENV OPENSSL_ROOT_DIR /usr/include/openssl + + +RUN git clone -b $branch https://github.com/boscore/bos.git --recursive \ + && cd bos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ && cmake -H. -B"/tmp/build" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/tmp/build -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ && cmake --build /tmp/build --target install && rm /tmp/build/bin/eosiocpp @@ -15,9 +19,9 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install openssl COPY --from=builder /usr/local/lib/* /usr/local/lib/ COPY --from=builder /tmp/build/bin /opt/eosio/bin COPY --from=builder /tmp/build/contracts /contracts -COPY --from=builder /eos/Docker/config.ini / +COPY --from=builder /bos/Docker/config.ini / COPY --from=builder /etc/eosio-version /etc -COPY --from=builder /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh +COPY --from=builder /bos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh ENV EOSIO_ROOT=/opt/eosio RUN chmod +x /opt/eosio/bin/nodeosd.sh ENV LD_LIBRARY_PATH /usr/local/lib diff --git a/Docker/README.md b/Docker/README.md index db0340e3116..bbaabec7438 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -1,6 +1,6 @@ # Run in docker -Simple and fast setup of EOS.IO on Docker is also available. +Simple and fast setup of BOSCore on Docker is also available. ## Install Dependencies @@ -12,30 +12,30 @@ Simple and fast setup of EOS.IO on Docker is also available. - At least 7GB RAM (Docker -> Preferences -> Advanced -> Memory -> 7GB or above) - If the build below fails, make sure you've adjusted Docker Memory settings and try again. -## Build eos image +## Build BOSCore image ```bash -git clone https://github.com/EOSIO/eos.git --recursive --depth 1 -cd eos/Docker -docker build . -t eosio/eos +git clone https://github.com/boscore/bos.git --recursive --depth 1 +cd bos/Docker +docker build . -t boscore/bos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.4.4 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.1 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.4.4 --build-arg branch=v1.4.4 . +docker build -t boscore/bos:v1.0.1 --build-arg branch=v1.0.1 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. ```bash -docker build -t eosio/eos --build-arg symbol= . +docker build -t boscore/bos --build-arg symbol= . ``` ## Start nodeos docker container only ```bash -docker run --name nodeos -p 8888:8888 -p 9876:9876 -t eosio/eos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 +docker run --name nodeos -p 8888:8888 -p 9876:9876 -t boscore/bos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 ``` By default, all data is persisted in a docker volume. It can be deleted if the data is outdated or corrupted: @@ -49,7 +49,7 @@ $ docker volume rm fdc265730a4f697346fa8b078c176e315b959e79365fc9cbd11f090ea0cb5 Alternately, you can directly mount host directory into the container ```bash -docker run --name nodeos -v /path-to-data-dir:/opt/eosio/bin/data-dir -p 8888:8888 -p 9876:9876 -t eosio/eos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 +docker run --name nodeos -v /path-to-data-dir:/opt/eosio/bin/data-dir -p 8888:8888 -p 9876:9876 -t boscore/bos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 ``` ## Get chain info @@ -92,13 +92,13 @@ docker-compose stop keosd ### Develop/Build custom contracts -Due to the fact that the eosio/eos image does not contain the required dependencies for contract development (this is by design, to keep the image size small), you will need to utilize the eosio/eos-dev image. This image contains both the required binaries and dependencies to build contracts using eosiocpp. +Due to the fact that the boscore/bos image does not contain the required dependencies for contract development (this is by design, to keep the image size small), you will need to utilize the boscore/bos-dev image. This image contains both the required binaries and dependencies to build contracts using eosiocpp. -You can either use the image available on [Docker Hub](https://hub.docker.com/r/eosio/eos-dev/) or navigate into the dev folder and build the image manually. +You can either use the image available on [Docker Hub](https://hub.docker.com/r/boscore/bos-dev/) or navigate into the dev folder and build the image manually. ```bash cd dev -docker build -t eosio/eos-dev . +docker build -t boscore/bos-dev . ``` ### Change default configuration @@ -133,7 +133,7 @@ docker volume rm keosd-data-volume ### Docker Hub -Docker Hub image available from [docker hub](https://hub.docker.com/r/eosio/eos/). +Docker Hub image available from [docker hub](https://hub.docker.com/r/boscore/bos/). Create a new `docker-compose.yaml` file with the content below ```bash @@ -141,7 +141,7 @@ version: "3" services: nodeosd: - image: eosio/eos:latest + image: boscore/bos:latest command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 hostname: nodeosd ports: @@ -153,7 +153,7 @@ services: - nodeos-data-volume:/opt/eosio/bin/data-dir keosd: - image: eosio/eos:latest + image: boscore/bos:latest command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=localhost:8900 --http-alias=keosd:8900 hostname: keosd links: @@ -169,13 +169,13 @@ volumes: *NOTE:* the default version is the latest, you can change it to what you want -run `docker pull eosio/eos:latest` +run `docker pull boscore/bos:latest` run `docker-compose up` -### EOSIO Testnet +### BOSCore Testnet -We can easily set up a EOSIO local testnet using docker images. Just run the following commands: +We can easily set up a BOSCore local testnet using docker images. Just run the following commands: Note: if you want to use the mongo db plugin, you have to enable it in your `data-dir/config.ini` first. diff --git a/Docker/builder/Dockerfile b/Docker/builder/Dockerfile index cac09937cd0..74677b701c4 100644 --- a/Docker/builder/Dockerfile +++ b/Docker/builder/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 -LABEL author="xiaobo " maintainer="Xiaobo Huang-Ming Huang " version="0.1.1" \ - description="This is a base image for building eosio/eos" +LABEL author="xiaobo " maintainer="Xiaobo Huang-Ming Huang Winlin " version="0.1.2" \ + description="This is a base image for building boscore/bos" RUN echo 'APT::Install-Recommends 0;' >> /etc/apt/apt.conf.d/01norecommends \ && echo 'APT::Install-Suggests 0;' >> /etc/apt/apt.conf.d/01norecommends \ @@ -56,3 +56,18 @@ RUN git clone --depth 1 -b releases/v3.3 https://github.com/mongodb/mongo-cxx-dr && make -j$(nproc) \ && make install \ && cd ../../ && rm -rf mongo-cxx-driver + +RUN git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git \ + && cd librdkafka/ \ + && cmake -H. -B_cmake_build \ + && cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build \ + && cd _cmake_build \ + && make install \ + && cd ../../ && rm -rf librdkafka + +RUN git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git \ + && cd cppkafka/ \ + && mkdir build && cd build \ + && cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. \ + && make install \ + && cd ../../ && rm -rf cppkafka diff --git a/Docker/config.ini b/Docker/config.ini index d9871858f19..71ae5c6c0ed 100644 --- a/Docker/config.ini +++ b/Docker/config.ini @@ -77,6 +77,9 @@ access-control-allow-credentials = false # The actual host:port used to listen for incoming p2p connections. (eosio::net_plugin) p2p-listen-endpoint = 0.0.0.0:9876 +#The p2p-discoverable is used to enable or disable p2p network self-discovery (eosio::net_plugin) +#p2p-discoverable= + # An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. (eosio::net_plugin) # p2p-server-address = @@ -158,3 +161,5 @@ txn-reference-block-lag = 0 # plugin = plugin = eosio::chain_api_plugin plugin = eosio::history_api_plugin +# enable this option to produce blocks +#plugin = eosio::producer_plugin diff --git a/Docker/dev/Dockerfile b/Docker/dev/Dockerfile index f2dea74ac6c..cd79c5e0e2d 100644 --- a/Docker/dev/Dockerfile +++ b/Docker/dev/Dockerfile @@ -1,8 +1,8 @@ -FROM eosio/builder +FROM boscore/builder ARG branch=master ARG symbol=SYS -RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ +RUN git clone -b $branch https://github.com/boscore/bos.git --recursive \ && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ && cmake -H. -B"/opt/eosio" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eosio -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ diff --git a/README.md b/README.md index 2a048d5b973..d1a66f70a0e 100644 --- a/README.md +++ b/README.md @@ -1,95 +1,29 @@ +# BOSCore - 更可用的链,为DApp而生。Born for DApp, be more useable. -# EOSIO - The Most Powerful Infrastructure for Decentralized Applications +## BOSCore Version: v1.0.1 +### Basic EOSIO Version: v1.4.4 -[![Build status](https://badge.buildkite.com/370fe5c79410f7d695e4e34c500b4e86e3ac021c6b1f739e20.svg?branch=master)](https://buildkite.com/EOSIO/eosio) +# 背景 +EOS的出现给区块链带来了新的想象力,主网启动短短几个月以来,版本经历了几十次升级,不仅稳定性得到了很大提高,并且新功能也逐步实现,各个节点团队也积极参与建设EOSIO生态。让人更加兴奋的是,EOS已经吸引了越来越多的开发团队,当前已经有数百个DApp在EOS主网上面运行,其交易量和流通市值远超以太坊,可发展的空间愈来愈广阔。 +在EOS主网逐渐发展的过程中,我们发现了一些偏离期望的地方。作为最有竞争力的第三代公链,大家希望看到的是能够有更多、更丰富的应用能够在EOS上面运行,开发者会将EOS作为自己应用开发的首选平台,但是由于目前EOS的资源模型的限制,导致了很高的使用成本,包括为用户创建更多的账户,以及部署运营DApp需要的较高成本。针对白皮书中要实现的上百万TPS需要的关键技术IBC,一直没有进行推进,主网多次出现CPU计算资源不足的情况,更是加剧了对跨链通讯需求的迫切性。此外,由于EOSIO采用的Pipeline-DPOS共识机制,一个交易需要近三分钟才能保证不可更改,虽然相较比特币、以太坊是有很大的进步,但是这也给EOS的应用场景带来很大限制,快速支付只能聚焦于小额转账,大额转账必须要等待足够长的时间才能保证不可更改,这就限制了链上、链下用户支付体验。 +除了上面提到的情况,还有很多其他改进想法一直在我们社区进行活跃的讨论,由此,我们觉得应该基于EOS进行更多的尝试,让更多的开发者或者团队来参与到EOSIO生态的建设中来,一起为区块链在不同行业不同场景中的落地做出一份努力。BOS作为一条完全由社区维护的EOS侧链,在继承其良好功能的基础上,会进行更多的尝试,并且会将经过验证的新特性、新功能反哺给EOSIO生态。 -Welcome to the EOSIO source code repository! This software enables businesses to rapidly build and deploy high-performance and high-security blockchain-based applications. +# 概述 +BOS致力于为用户提供方便进入并易于使用的区块链服务,为DApp运营提供更友好的基础设施,为支持更丰富的应用场景努力,为DApp大繁荣进行积极尝试。除了技术改进以外,BOS也会进行其他方面的尝试。比如,为了提高用户投票参与度,可以通过预言机技术来针对符合明确规则的账户进行激励;BOS上面的BP的奖励会根据链上DApp的数量、TPS、市值、流通量等指标进行调整,鼓励每个BP为生态提供更多资源;一项社区公投达成的决议将会尽量被代码化,减少人为的因素在里面,流程上链,保持公正透明。 +BOS链的代码完全由社区贡献并维护,每个生态参与者都可以提交代码或者建议,相关的流程会参考已有开源软件来进行,比如PEP(Python Enhancement Proposals)。 +为鼓励DApp在BOS的发展,BOS基金会将会为其上的DApp提供Token置换的低成本的资源抵押服务,降低DApp前期的运营成本;此外还会定期对做出贡献的开发者或者功能验证者提供BOS激励,以便建立起一个相互促进的社区发展趋势。 -Some of the groundbreaking features of EOSIO include: +## 资源 +1. [Website](https://boscore.io) +2. [Developer Telegram Group](https://t.me/BOSCoreProject) -1. Free Rate Limited Transactions -1. Low Latency Block confirmation (0.5 seconds) -1. Low-overhead Byzantine Fault Tolerant Finality -1. Designed for optional high-overhead, low-latency BFT finality -1. Smart contract platform powered by Web Assembly -1. Designed for Sparse Header Light Client Validation -1. Scheduled Recurring Transactions -1. Time Delay Security -1. Hierarchical Role Based Permissions -1. Support for Biometric Hardware Secured Keys (e.g. Apple Secure Enclave) -1. Designed for Parallel Execution of Context Free Validation Logic -1. Designed for Inter Blockchain Communication +## 开始 +1. 源码直接编译: `bash ./eosio_build.sh` +2. Docker方式部署,参看 [Docker](./Docker/README.md) -EOSIO is released under the open source MIT license and is offered “AS IS” without warranty of any kind, express or implied. Any security provided by the EOSIO software depends in part on how it is used, configured, and deployed. EOSIO is built upon many third-party libraries such as Binaryen (Apache License) and WAVM (BSD 3-clause) which are also provided “AS IS” without warranty of any kind. Without limiting the generality of the foregoing, Block.one makes no representation or guarantee that EOSIO or any third-party libraries will perform as intended or will be free of errors, bugs or faulty code. Both may fail in large or small ways that could completely or partially limit functionality or compromise computer systems. If you use or implement EOSIO, you do so at your own risk. In no event will Block.one be liable to any party for any damages whatsoever, even if it had been advised of the possibility of damage. +BOSCore是基于EOSIO技术的扩展,所以EOSIO的相关资料也可以参考: -Block.one is neither launching nor operating any initial public blockchains based upon the EOSIO software. This release refers only to version 1.0 of our open source software. We caution those who wish to use blockchains built on EOSIO to carefully vet the companies and organizations launching blockchains based on EOSIO before disclosing any private keys to their derivative software. +[Getting Started](https://developers.eos.io/eosio-nodeos/docs/overview-1) -There is no public testnet running currently. +[EOSIO Developer Portal](https://developers.eos.io). -**If you have previously installed EOSIO, please run the `eosio_uninstall` script (it is in the directory where you cloned EOSIO) before downloading and using the binary releases.** - -#### Mac OS X Brew Install -```sh -$ brew tap eosio/eosio -$ brew install eosio -``` -#### Mac OS X Brew Uninstall -```sh -$ brew remove eosio -``` -#### Ubuntu 18.04 Debian Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.4.4/eosio_1.4.4-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.4.4-1-ubuntu-18.04_amd64.deb -``` -#### Ubuntu 16.04 Debian Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.4.4/eosio_1.4.4-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.4.4-1-ubuntu-16.04_amd64.deb -``` -#### Debian Package Uninstall -```sh -$ sudo apt remove eosio -``` -#### Centos RPM Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.4.4/eosio-1.4.4-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.4.4-1.el7.x86_64.rpm -``` -#### Centos RPM Package Uninstall -```sh -$ sudo yum remove eosio.cdt -``` -#### Fedora RPM Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.4.4/eosio-1.4.4-1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.4.4-1.fc27.x86_64.rpm -``` -#### Fedora RPM Package Uninstall -```sh -$ sudo yum remove eosio.cdt -``` - -## Supported Operating Systems -EOSIO currently supports the following operating systems: -1. Amazon 2017.09 and higher -2. Centos 7 -3. Fedora 25 and higher (Fedora 27 recommended) -4. Mint 18 -5. Ubuntu 16.04 (Ubuntu 16.10 recommended) -6. Ubuntu 18.04 -7. MacOS Darwin 10.12 and higher (MacOS 10.13.x recommended) - -## Resources -1. [Website](https://eos.io) -1. [Blog](https://medium.com/eosio) -1. [Developer Portal](https://developers.eos.io) -1. [StackExchange for Q&A](https://eosio.stackexchange.com/) -1. [Community Telegram Group](https://t.me/EOSProject) -1. [Developer Telegram Group](https://t.me/joinchat/EaEnSUPktgfoI-XPfMYtcQ) -1. [White Paper](https://github.com/EOSIO/Documentation/blob/master/TechnicalWhitePaper.md) -1. [Roadmap](https://github.com/EOSIO/Documentation/blob/master/Roadmap.md) - - -## Getting Started -Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in [Getting Started](https://developers.eos.io/eosio-nodeos/docs/overview-1) on the [EOSIO Developer Portal](https://developers.eos.io). diff --git a/contracts/eosio.system/delegate_bandwidth.cpp b/contracts/eosio.system/delegate_bandwidth.cpp index 1e190e9fe3c..95a40781530 100644 --- a/contracts/eosio.system/delegate_bandwidth.cpp +++ b/contracts/eosio.system/delegate_bandwidth.cpp @@ -205,7 +205,7 @@ namespace eosiosystem { const int64_t max_claimable = 100'000'000'0000ll; const int64_t claimable = int64_t(max_claimable * double(now()-base_time) / (10*seconds_per_year) ); - eosio_assert( max_claimable - claimable <= stake, "b1 can only claim their tokens over 10 years" ); + eosio_assert( max_claimable - claimable <= stake, "bosbosbosbos can only claim their tokens over 10 years" ); } void system_contract::changebw( account_name from, account_name receiver, diff --git a/contracts/eosio.system/eosio.system.hpp b/contracts/eosio.system/eosio.system.hpp index a33238a1eaa..66964e39659 100644 --- a/contracts/eosio.system/eosio.system.hpp +++ b/contracts/eosio.system/eosio.system.hpp @@ -172,7 +172,6 @@ namespace eosiosystem { void undelegatebw( account_name from, account_name receiver, asset unstake_net_quantity, asset unstake_cpu_quantity ); - /** * Increases receiver's ram quota based upon current price and quantity of * tokens provided. An inline transfer from receiver to system contract of diff --git a/contracts/eosio.system/voting.cpp b/contracts/eosio.system/voting.cpp index 166f1707cd7..feeb53fc3d8 100644 --- a/contracts/eosio.system/voting.cpp +++ b/contracts/eosio.system/voting.cpp @@ -86,8 +86,15 @@ namespace eosiosystem { return; } - /// sort by producer name - std::sort( top_producers.begin(), top_producers.end() ); + /// sort by producer location + struct { + bool operator()(std::pair a, std::pair b) const + { + return a.second ==b.second?a.first producers; diff --git a/contracts/eosiolib/transaction.h b/contracts/eosiolib/transaction.h index dd7c05ded17..db115ca27e1 100644 --- a/contracts/eosiolib/transaction.h +++ b/contracts/eosiolib/transaction.h @@ -94,6 +94,28 @@ extern "C" { */ size_t transaction_size(); + /** + * Get transaction id + * + * @param id : return id + */ + void get_transaction_id( transaction_id_type* id ); + + /** + * Get the action globally unique sequence + * + * @param seq : return sequence + */ + void get_action_sequence(uint64_t* seq); + + /** + * Get the producer's signature for the action + * @param sig : Memory buffer + * @param siglen :Memory buffer size + * @return : Return valid data size + */ + int bpsig_action_time_seed( const char* sig, size_t siglen ); + /** * Gets the block number used for TAPOS on the currently executing transaction. * diff --git a/eosio_build.sh b/eosio_build.sh index b1988d74f0c..fafdbbaeb8e 100755 --- a/eosio_build.sh +++ b/eosio_build.sh @@ -120,8 +120,8 @@ if [ ! -d "${SOURCE_DIR}/.git" ]; then printf "\\n\\tThis build script only works with sources cloned from git\\n" - printf "\\tPlease clone a new eos directory with 'git clone https://github.com/EOSIO/eos --recursive'\\n" - printf "\\tSee the wiki for instructions: https://github.com/EOSIO/eos/wiki\\n" + printf "\\tPlease clone a new bos directory with 'git clone https://github.com/boscore/bos --recursive'\\n" + printf "\\tSee the wiki for instructions: https://github.com/boscore/bos/wiki\\n" exit 1 fi @@ -238,7 +238,7 @@ . "$FILE" - printf "\\n\\n>>>>>>>> ALL dependencies sucessfully found or installed . Installing EOSIO\\n\\n" + printf "\\n\\n>>>>>>>> ALL dependencies sucessfully found or installed . Installing BOSCore\\n\\n" printf ">>>>>>>> CMAKE_BUILD_TYPE=%s\\n" "${CMAKE_BUILD_TYPE}" printf ">>>>>>>> ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" printf ">>>>>>>> DOXYGEN=%s\\n\\n" "${DOXYGEN}" @@ -267,41 +267,42 @@ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_INSTALL_PREFIX="/usr/local/eosio" ${LOCAL_CMAKE_FLAGS} "${SOURCE_DIR}" then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> CMAKE building EOSIO has exited with the above error.\\n\\n" + printf "\\n\\t>>>>>>>>>>>>>>>>>>>> CMAKE building BOSCore has exited with the above error.\\n\\n" exit -1 fi if [ "${START_MAKE}" == "false" ]; then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> EOSIO has been successfully configured but not yet built.\\n\\n" + printf "\\n\\t>>>>>>>>>>>>>>>>>>>> BOSCore has been successfully configured but not yet built.\\n\\n" exit 0 fi if [ -z ${JOBS} ]; then JOBS=$CPU_CORE; fi # Future proofing: Ensure $JOBS is set (usually set in scripts/eosio_build_*.sh scripts) if ! make -j"${JOBS}" then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> MAKE building EOSIO has exited with the above error.\\n\\n" + printf "\\n\\t>>>>>>>>>>>>>>>>>>>> MAKE building BOSCore has exited with the above error.\\n\\n" exit -1 fi TIME_END=$(( $(date -u +%s) - ${TIME_BEGIN} )) - printf "\n\n${bldred}\t _______ _______ _______ _________ _______\n" - printf '\t( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' - printf "\t| ( \/| ( ) || ( \/ ) ( | ( ) |\n" - printf "\t| (__ | | | || (_____ | | | | | |\n" - printf "\t| __) | | | |(_____ ) | | | | | |\n" - printf "\t| ( | | | | ) | | | | | | |\n" - printf "\t| (____/\| (___) |/\____) |___) (___| (___) |\n" - printf "\t(_______/(_______)\_______)\_______/(_______)\n${txtrst}" + printf "\n\n${bldred}\t ______ _______ _______ _______ _______ _______ _______ \n" + printf "\t( ___ \ ( ___ )( ____ \( ____ \( ___ )( ____ )( ____ \ \n" + printf "\t| ( ) )| ( ) || ( \/| ( \/| ( ) || ( )|| ( \/\n" + printf "\t| (__/ / | | | || (_____ | | | | | || (____)|| (__ \n" + printf "\t| __ ( | | | |(_____ )| | | | | || __)| __) \n" + printf "\t| ( \ \ | | | | ) || | | | | || (\ ( | ( \n" + printf "\t| )___) )| (___) |/\____) || (____/\| (___) || ) \ \__| (____/\ \n" + printf "\t|/ \___/ (_______)\_______)(_______/(_______)|/ \__/(_______/\n\n${txtrst}" - printf "\\n\\tEOSIO has been successfully built. %02d:%02d:%02d\\n\\n" $(($TIME_END/3600)) $(($TIME_END%3600/60)) $(($TIME_END%60)) + printf "\\n\\tBOSCore has been successfully built. %02d:%02d:%02d\\n\\n" $(($TIME_END/3600)) $(($TIME_END%3600/60)) $(($TIME_END%60)) printf "\\tTo verify your installation run the following commands:\\n" print_instructions printf "\\tFor more information:\\n" - printf "\\tEOSIO website: https://eos.io\\n" - printf "\\tEOSIO Telegram channel @ https://t.me/EOSProject\\n" + printf "\\tBOSCore website: https://boscore.io\\n" + printf "\\tBOSCore Telegram channel @ https://t.me/BOSCoreProject\\n" + printf "\\tBOSCore wiki: https://github.com/boscore/bos/wiki\\n" printf "\\tEOSIO resources: https://eos.io/resources/\\n" printf "\\tEOSIO Stack Exchange: https://eosio.stackexchange.com\\n" printf "\\tEOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" diff --git a/eosio_install.sh b/eosio_install.sh index 9ed195df7d0..fcb6e3c81d7 100755 --- a/eosio_install.sh +++ b/eosio_install.sh @@ -103,18 +103,20 @@ fi install_symlinks create_cmake_symlink "eosio-config.cmake" - printf "\n\n${bldred}\t _______ _______ _______ _________ _______\n" - printf '\t( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' - printf "\t| ( \/| ( ) || ( \/ ) ( | ( ) |\n" - printf "\t| (__ | | | || (_____ | | | | | |\n" - printf "\t| __) | | | |(_____ ) | | | | | |\n" - printf "\t| ( | | | | ) | | | | | | |\n" - printf "\t| (____/\| (___) |/\____) |___) (___| (___) |\n" - printf "\t(_______/(_______)\_______)\_______/(_______)\n${txtrst}" + printf "\n\n${bldred}\t ______ _______ _______ _______ _______ _______ _______ \n" + printf "\t( ___ \ ( ___ )( ____ \( ____ \( ___ )( ____ )( ____ \ \n" + printf "\t| ( ) )| ( ) || ( \/| ( \/| ( ) || ( )|| ( \/\n" + printf "\t| (__/ / | | | || (_____ | | | | | || (____)|| (__ \n" + printf "\t| __ ( | | | |(_____ )| | | | | || __)| __) \n" + printf "\t| ( \ \ | | | | ) || | | | | || (\ ( | ( \n" + printf "\t| )___) )| (___) |/\____) || (____/\| (___) || ) \ \__| (____/\ \n" + printf "\t|/ \___/ (_______)\_______)(_______/(_______)|/ \__/(_______/\n\n${txtrst}" printf "\\tFor more information:\\n" - printf "\\tEOSIO website: https://eos.io\\n" - printf "\\tEOSIO Telegram channel @ https://t.me/EOSProject\\n" + printf "\\tBOSCore website: https://boscore.io\\n" + printf "\\tBOSCore Telegram channel @ https://t.me/BOSCoreProject\\n" + printf "\\tBOSCore wiki: https://github.com/boscore/bos/wiki\\n" printf "\\tEOSIO resources: https://eos.io/resources/\\n" printf "\\tEOSIO Stack Exchange: https://eosio.stackexchange.com\\n" printf "\\tEOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" + diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index de1450013d8..07ab384eeff 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -44,6 +44,9 @@ void apply_context::exec_one( action_trace& trace ) trace.act = act; trace.context_free = context_free; + const auto& p = control.get_dynamic_global_properties(); + global_action_sequence = p.global_action_sequence + 1; + const auto& cfg = control.get_global_properties().configuration; try { try { @@ -79,6 +82,7 @@ void apply_context::exec_one( action_trace& trace ) r.global_sequence = next_global_sequence(); r.recv_sequence = next_recv_sequence( receiver ); + global_action_sequence = 0; const auto& account_sequence = db.get(act.account); r.code_sequence = account_sequence.code_sequence; // could be modified by action execution above diff --git a/libraries/chain/block_header.cpp b/libraries/chain/block_header.cpp index 8ba95b705e2..b73338b3ca2 100644 --- a/libraries/chain/block_header.cpp +++ b/libraries/chain/block_header.cpp @@ -28,5 +28,15 @@ namespace eosio { namespace chain { return result; } + void block_header::set_block_extensions_mroot(digest_type& mroot) + { + if (header_extensions.size() < 1) + header_extensions.emplace_back(); + + header_extensions[0].first = static_cast(block_header_extensions_type::block_extensions_mroot); + header_extensions[0].second.resize(mroot.data_size()); + std::copy(mroot.data(), mroot.data() + mroot.data_size(), header_extensions[0].second.data()); + } + } } diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 6e7b339c42c..2ae15af7341 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -145,7 +145,7 @@ namespace eosio { namespace chain { */ block_header_state block_header_state::next( const signed_block_header& h, bool trust )const { EOS_ASSERT( h.timestamp != block_timestamp_type(), block_validate_exception, "", ("h",h) ); - EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); + //EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); EOS_ASSERT( h.timestamp > header.timestamp, block_validate_exception, "block must be later in time" ); EOS_ASSERT( h.previous == id, unlinkable_block_exception, "block must link to current state" ); @@ -175,8 +175,10 @@ namespace eosio { namespace chain { result.header.action_mroot = h.action_mroot; result.header.transaction_mroot = h.transaction_mroot; result.header.producer_signature = h.producer_signature; + result.header.header_extensions = h.header_extensions; result.id = result.header.id(); + // ASSUMPTION FROM controller_impl::apply_block = all untrusted blocks will have their signatures pre-validated here if( !trust ) { EOS_ASSERT( result.block_signing_key == result.signee(), wrong_signing_key, "block not signed by expected key", diff --git a/libraries/chain/chain_config.cpp b/libraries/chain/chain_config.cpp index 974675749fb..efb66bba95f 100644 --- a/libraries/chain/chain_config.cpp +++ b/libraries/chain/chain_config.cpp @@ -43,4 +43,10 @@ namespace eosio { namespace chain { "max authority depth should be at least 1" ); } +void chain_config2::validate() const{ + EOS_ASSERT(std::numeric_limits::max() > actor_blacklist.size(), action_validate_exception, "Overflow in blacklist when adding actor blacklist!"); + EOS_ASSERT(std::numeric_limits::max() > contract_blacklist.size(), action_validate_exception, "Overflow in blacklist when adding contract blacklist!"); + EOS_ASSERT(std::numeric_limits::max() > resource_greylist.size(), action_validate_exception, "Overflow in greylistwhen adding resource greylist!"); +} + } } // namespace eosio::chain diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 41e9d551728..6bea61766be 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -33,6 +33,7 @@ using controller_index_set = index_set< account_index, account_sequence_index, global_property_multi_index, + global_property2_multi_index, dynamic_global_property_multi_index, block_summary_multi_index, transaction_multi_index, @@ -108,6 +109,8 @@ struct pending_state { optional _producer_block_id; + std::function _signer; + void push() { _db_session.push(); } @@ -391,6 +394,7 @@ struct controller_impl { ilog( "database initialized with hash: ${hash}", ("hash", hash) ); } + } ~controller_impl() { @@ -637,6 +641,19 @@ struct controller_impl { }); db.create([](auto&){}); + // *bos begin* + //guaranteed minimum resources which is abbreviated gmr + db.create([&](auto &gpo) { + gpo.gmr.cpu_us = config::default_gmr_cpu_limit; + gpo.gmr.net_byte = config::default_gmr_net_limit; + gpo.gmr.ram_byte = config::default_gmr_ram_limit; + }); + + sync_name_list(list_type::actor_blacklist_type,true); + sync_name_list(list_type::contract_blacklist_type,true); + sync_name_list(list_type::resource_greylist_type,true); + // *bos end* + authorization.initialize_database(); resource_limits.initialize_database(); @@ -662,7 +679,73 @@ struct controller_impl { conf.genesis.initial_timestamp ); } + // "bos begin" + void set_name_list(list_type list, list_action_type action, std::vector name_list) + { + int64_t lst = static_cast(list); + + EOS_ASSERT(list >= list_type::actor_blacklist_type && list < list_type::list_type_count, transaction_exception, "unknown list type : ${l}, action: ${n}", ("l", static_cast(list))("n", static_cast(action))); + vector *> lists = {&conf.actor_blacklist, &conf.contract_blacklist, &conf.resource_greylist}; + EOS_ASSERT(lists.size() == static_cast(list_type::list_type_count) - 1, transaction_exception, " list size wrong : ${l}, action: ${n}", ("l", static_cast(list))("n", static_cast(action))); + + flat_set &lo = *lists[lst - 1]; + + if (action == list_action_type::insert_type) + { + lo.insert(name_list.begin(), name_list.end()); + } + else if (action == list_action_type::remove_type) + { + flat_set name_set(name_list.begin(), name_list.end()); + + flat_set results; + results.reserve(lo.size()); + set_difference(lo.begin(), lo.end(), + name_set.begin(), name_set.end(), + std::inserter(results,results.begin())); + + lo = results; + } + + sync_name_list(list); + } + + void sync_list_and_db(list_type list, global_property2_object &gprops2,bool isMerge=false) + { + int64_t lst = static_cast(list); + EOS_ASSERT( list >= list_type::actor_blacklist_type && list < list_type::list_type_count, transaction_exception, "unknown list type : ${l}, ismerge: ${n}", ("l", static_cast(list))("n", isMerge)); + vector *> lists = {&gprops2.cfg.actor_blacklist, &gprops2.cfg.contract_blacklist, &gprops2.cfg.resource_greylist}; + vector *> conflists = {&conf.actor_blacklist, &conf.contract_blacklist, &conf.resource_greylist}; + EOS_ASSERT(lists.size() == static_cast(list_type::list_type_count) - 1, transaction_exception, " list size wrong : ${l}, ismerge: ${n}", ("l", static_cast(list))("n", isMerge)); + shared_vector &lo = *lists[lst - 1]; + flat_set &clo = *conflists[lst - 1]; + + if (isMerge) + { + //initialize, merge elements and deduplication between list and db.result save to list + for (auto &a : lo) + { + clo.insert(a); + } + } + + //clear list from db and save merge result to db object + lo.clear(); + for (auto &a : clo) + { + lo.push_back(a); + } + } + + void sync_name_list(list_type list,bool isMerge=false) + { + const auto &gpo2 = db.get(); + db.modify(gpo2, [&](auto &gprops2) { + sync_list_and_db(list, gprops2,isMerge); + }); + } + // "bos end" /** * @post regardless of the success of commit block there is no active pending block @@ -1074,7 +1157,7 @@ struct controller_impl { void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, - const optional& producer_block_id ) + const optional& producer_block_id , std::function signer = nullptr) { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); @@ -1093,6 +1176,7 @@ struct controller_impl { pending->_block_status = s; pending->_producer_block_id = producer_block_id; + pending->_signer = signer; pending->_pending_block_state = std::make_shared( *head, when ); // promotes pending schedule (if any) to active pending->_pending_block_state->in_current_chain = true; @@ -1160,10 +1244,13 @@ struct controller_impl { void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { try { - EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); + //EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); auto producer_block_id = b->id(); start_block( b->timestamp, b->confirmed, s , producer_block_id); + pending->_pending_block_state->block->header_extensions = b->header_extensions; + pending->_pending_block_state->block->block_extensions = b->block_extensions; + transaction_trace_ptr trace; for( const auto& receipt : b->transactions ) { @@ -1228,7 +1315,6 @@ struct controller_impl { void push_block( const signed_block_ptr& b, controller::block_status s ) { EOS_ASSERT(!pending, block_validate_exception, "it is not valid to push a block when there is a pending block"); - auto reset_prod_light_validation = fc::make_scoped_exit([old_value=trusted_producer_light_validation, this]() { trusted_producer_light_validation = old_value; }); @@ -1361,6 +1447,17 @@ struct controller_impl { pending->_pending_block_state->header.transaction_mroot = merkle( move(trx_digests) ); } + void set_ext_merkle() { + vector ext_digests; + const auto& exts = pending->_pending_block_state->block->block_extensions; + ext_digests.reserve( exts.size()); + for( const auto& a : exts ) + ext_digests.emplace_back( digest_type::hash(a) ); + + auto mroot = merkle( move(ext_digests)); + pending->_pending_block_state->header.set_block_extensions_mroot(mroot); + } + void finalize_block() { @@ -1385,16 +1482,24 @@ struct controller_impl { // Update resource limits: resource_limits.process_account_limit_updates(); const auto& chain_config = self.get_global_properties().configuration; + const auto& gmr = self.get_global_properties2().gmr;//guaranteed minimum resources which is abbreviated gmr + uint32_t max_virtual_mult = 1000; uint64_t CPU_TARGET = EOS_PERCENT(chain_config.max_block_cpu_usage, chain_config.target_block_cpu_usage_pct); resource_limits.set_block_parameters( { CPU_TARGET, chain_config.max_block_cpu_usage, config::block_cpu_usage_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}}, {EOS_PERCENT(chain_config.max_block_net_usage, chain_config.target_block_net_usage_pct), chain_config.max_block_net_usage, config::block_size_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}} ); + + resource_limits.set_gmr_parameters( + { gmr.ram_byte, gmr.cpu_us,gmr.net_byte} + ); + resource_limits.process_block_usage(pending->_pending_block_state->block_num); set_action_merkle(); set_trx_merkle(); + set_ext_merkle(); auto p = pending->_pending_block_state; p->id = p->header.id(); @@ -1605,9 +1710,9 @@ chainbase::database& controller::mutable_db()const { return my->db; } const fork_database& controller::fork_db()const { return my->fork_db; } -void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { +void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count, std::function signer) { validate_db_available_size(); - my->start_block(when, confirm_block_count, block_status::incomplete, optional() ); + my->start_block(when, confirm_block_count, block_status::incomplete, optional() , signer); } void controller::finalize_block() { @@ -1677,12 +1782,21 @@ void controller::set_actor_whitelist( const flat_set& new_actor_wh } void controller::set_actor_blacklist( const flat_set& new_actor_blacklist ) { my->conf.actor_blacklist = new_actor_blacklist; + + // *bos begin* + my->sync_name_list(list_type::actor_blacklist_type); + // *bos end* } void controller::set_contract_whitelist( const flat_set& new_contract_whitelist ) { my->conf.contract_whitelist = new_contract_whitelist; } void controller::set_contract_blacklist( const flat_set& new_contract_blacklist ) { my->conf.contract_blacklist = new_contract_blacklist; + + // *bos begin* + my->sync_name_list(list_type::contract_blacklist_type); + // *bos end* + } void controller::set_action_blacklist( const flat_set< pair >& new_action_blacklist ) { for (auto& act: new_action_blacklist) { @@ -1744,6 +1858,11 @@ optional controller::pending_producer_block_id()const { return my->pending->_producer_block_id; } +std::function controller::pending_producer_signer()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + return my->pending->_signer; +} + uint32_t controller::last_irreversible_block_num() const { return std::max(std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum), my->snapshot_head_block); } @@ -2092,10 +2211,19 @@ void controller::set_subjective_cpu_leeway(fc::microseconds leeway) { void controller::add_resource_greylist(const account_name &name) { my->conf.resource_greylist.insert(name); + + // *bos begin* + my->sync_name_list(list_type::resource_greylist_type); + // *bos end* } void controller::remove_resource_greylist(const account_name &name) { + my->conf.resource_greylist.erase(name); + + // *bos begin* + my->sync_name_list(list_type::resource_greylist_type); + // *bos end* } bool controller::is_resource_greylisted(const account_name &name) const { @@ -2106,4 +2234,21 @@ const flat_set &controller::get_resource_greylist() const { return my->conf.resource_greylist; } +// *bos begin* +const global_property2_object& controller::get_global_properties2()const { + return my->db.get(); +} + +void controller::set_name_list(int64_t list, int64_t action, std::vector name_list) +{ + //redundant sync + my->sync_name_list(list_type::actor_blacklist_type, true); + my->sync_name_list(list_type::contract_blacklist_type, true); + my->sync_name_list(list_type::resource_greylist_type, true); + + my->set_name_list(static_cast(list), static_cast(action), name_list); +} +// *bos end* + + } } /// eosio::chain diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index c56ed0add05..317453f19b8 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -142,7 +142,6 @@ namespace eosio { namespace chain { block_state_ptr fork_database::add( signed_block_ptr b, bool trust ) { EOS_ASSERT( b, fork_database_exception, "attempt to add null block" ); EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); - const auto& by_id_idx = my->index.get(); auto existing = by_id_idx.find( b->id() ); EOS_ASSERT( existing == by_id_idx.end(), fork_database_exception, "we already know about this block" ); @@ -203,7 +202,6 @@ namespace eosio { namespace chain { /// remove all of the invalid forks built of this id including this id void fork_database::remove( const block_id_type& id ) { vector remove_queue{id}; - for( uint32_t i = 0; i < remove_queue.size(); ++i ) { auto itr = my->index.find( remove_queue[i] ); if( itr != my->index.end() ) diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index a253d950358..2b68d015442 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -594,6 +594,7 @@ class apply_context { bool privileged = false; bool context_free = false; bool used_context_free_api = false; + uint64_t global_action_sequence = 0; generic_index idx64; generic_index idx128; diff --git a/libraries/chain/include/eosio/chain/block.hpp b/libraries/chain/include/eosio/chain/block.hpp index 0e85b167df8..28c49f9772e 100644 --- a/libraries/chain/include/eosio/chain/block.hpp +++ b/libraries/chain/include/eosio/chain/block.hpp @@ -51,6 +51,9 @@ namespace eosio { namespace chain { } }; + enum class block_extension_type : uint16_t { + bpsig_action_time_seed + }; /** */ diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index bf9cf0bedb8..723824b5310 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -4,6 +4,12 @@ namespace eosio { namespace chain { + /* Extended spatial data category + */ + enum block_header_extensions_type : uint16_t { + block_extensions_mroot = 0 // mroot of block extensions + }; + struct block_header { block_timestamp_type timestamp; @@ -32,13 +38,14 @@ namespace eosio { namespace chain { */ uint32_t schedule_version = 0; optional new_producers; - extensions_type header_extensions; + extensions_type header_extensions; // [0] : mroot of block extensions digest_type digest()const; block_id_type id() const; uint32_t block_num() const { return num_from_id(previous) + 1; } static uint32_t num_from_id(const block_id_type& id); + void set_block_extensions_mroot(digest_type& mroot); }; diff --git a/libraries/chain/include/eosio/chain/chain_config.hpp b/libraries/chain/include/eosio/chain/chain_config.hpp index 7f62ff111f9..907846d1a55 100644 --- a/libraries/chain/include/eosio/chain/chain_config.hpp +++ b/libraries/chain/include/eosio/chain/chain_config.hpp @@ -65,6 +65,24 @@ struct chain_config { bool operator==(const chain_config& a, const chain_config& b); inline bool operator!=(const chain_config& a, const chain_config& b) { return !(a == b); } +// *bos* +struct chain_config2 { + chain_config2( chainbase::allocator alloc ) + :actor_blacklist(alloc),contract_blacklist(alloc),resource_greylist(alloc){} + + shared_vector actor_blacklist; + shared_vector contract_blacklist; + shared_vector resource_greylist; + + void validate()const; +}; + +// *bos* +struct guaranteed_minimum_resources { + uint64_t ram_byte; + uint64_t cpu_us; + uint64_t net_byte; +}; } } // namespace eosio::chain FC_REFLECT(eosio::chain::chain_config, @@ -79,3 +97,6 @@ FC_REFLECT(eosio::chain::chain_config, (max_inline_action_size)(max_inline_action_depth)(max_authority_depth) ) +// *bos* +FC_REFLECT( eosio::chain::chain_config2, (actor_blacklist)(contract_blacklist)(resource_greylist) ) +FC_REFLECT( eosio::chain::guaranteed_minimum_resources, (ram_byte)(cpu_us)(net_byte) ) diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index c0e9806319e..6aea7c3e3bd 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -5,6 +5,7 @@ #pragma once #include #include +#include "config_xos.hpp" #pragma GCC diagnostic ignored "-Wunused-variable" diff --git a/libraries/chain/include/eosio/chain/config_xos.hpp b/libraries/chain/include/eosio/chain/config_xos.hpp new file mode 100644 index 00000000000..02adf0d54d0 --- /dev/null +++ b/libraries/chain/include/eosio/chain/config_xos.hpp @@ -0,0 +1,23 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once +#include +#include + +#pragma GCC diagnostic ignored "-Wunused-variable" + +namespace eosio { namespace chain { namespace config { + +//guaranteed minimum resources which is abbreviated gmr +const static uint32_t default_gmr_cpu_limit = 200'000; /// free cpu usage in microseconds +const static uint32_t default_gmr_net_limit = 10 * 1024; // 10 KB +const static uint32_t default_gmr_ram_limit = 0; // 0 KB +const static uint16_t default_gmr_resource_limit_per_day = 1000; + + + +} } } // namespace eosio::chain::config + + diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index ec7b53fafc0..17ca27b3235 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -27,6 +27,7 @@ namespace eosio { namespace chain { class dynamic_global_property_object; class global_property_object; + class global_property2_object; // *bos* class permission_object; class account_object; using resource_limits::resource_limits_manager; @@ -45,6 +46,21 @@ namespace eosio { namespace chain { FULL, LIGHT }; + // *bos begin* + enum class list_type:int64_t { + actor_blacklist_type=1, + contract_blacklist_type, + resource_greylist_type, + list_type_count + }; + enum class list_action_type:int64_t + { + insert_type = 1, + remove_type, + list_action_type_count + }; + + // *bos end* class controller { public: @@ -95,7 +111,7 @@ namespace eosio { namespace chain { * Starts a new pending block session upon which new transactions can * be pushed. */ - void start_block( block_timestamp_type time = block_timestamp_type(), uint16_t confirm_block_count = 0 ); + void start_block( block_timestamp_type time = block_timestamp_type(), uint16_t confirm_block_count = 0, std::function signer = nullptr ); void abort_block(); @@ -190,6 +206,7 @@ namespace eosio { namespace chain { optional pending_producer_block_id()const; const producer_schedule_type& active_producers()const; + std::function pending_producer_signer()const; const producer_schedule_type& pending_producers()const; optional proposed_producers()const; @@ -216,6 +233,15 @@ namespace eosio { namespace chain { void add_resource_greylist(const account_name &name); void remove_resource_greylist(const account_name &name); + + // *bos begin* + const global_property2_object& get_global_properties2()const; // *bos* + void set_name_list(int64_t list, int64_t action, std::vector name_list); + + // void list_add_name(const int list, const account_name &name); + // void list_remove_name(const int list, const account_name &name); + // *bos end* + bool is_resource_greylisted(const account_name &name) const; const flat_set &get_resource_greylist() const; diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index fe9ae85db10..6f4f35ffd80 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -34,6 +34,15 @@ namespace eosio { namespace chain { chain_config configuration; }; + // *bos* + class global_property2_object : public chainbase::object + { + OBJECT_CTOR(global_property2_object, (cfg)) + + id_type id; + chain_config2 cfg; + guaranteed_minimum_resources gmr;//guaranteed_minimum_resources + }; /** @@ -71,11 +80,22 @@ namespace eosio { namespace chain { > >; + // *bos* + using global_property2_multi_index = chainbase::shared_multi_index_container< + global_property2_object, + indexed_by< + ordered_unique, + BOOST_MULTI_INDEX_MEMBER(global_property2_object, global_property2_object::id_type, id) + > + > + >; }} CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property_object, eosio::chain::global_property_multi_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::dynamic_global_property_object, eosio::chain::dynamic_global_property_multi_index) +// *bos* +CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property2_object, eosio::chain::global_property2_multi_index) FC_REFLECT(eosio::chain::dynamic_global_property_object, (global_action_sequence) @@ -84,3 +104,7 @@ FC_REFLECT(eosio::chain::dynamic_global_property_object, FC_REFLECT(eosio::chain::global_property_object, (proposed_schedule_block_num)(proposed_schedule)(configuration) ) +// *bos* +FC_REFLECT(eosio::chain::global_property2_object, + (cfg)(gmr) + ) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index 4b0c58beeb0..616deb3f2a0 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -35,6 +35,16 @@ namespace eosio { namespace chain { namespace resource_limits { int64_t max = 0; ///< max per window under current congestion }; + + struct gmr_parameters { + uint64_t ram_byte; + uint64_t cpu_us; + uint64_t net_byte; + + void validate()const; // throws if the parameters do not satisfy basic sanity checks + }; + + class resource_limits_manager { public: explicit resource_limits_manager(chainbase::database& db) @@ -50,6 +60,14 @@ namespace eosio { namespace chain { namespace resource_limits { void initialize_account( const account_name& account ); void set_block_parameters( const elastic_limit_parameters& cpu_limit_parameters, const elastic_limit_parameters& net_limit_parameters ); + + /** + * @brief Set the guaranteed minimum resources parameters object + * + * @param res_parameters guaranteed minimum resources parameters object include ram net cpu + */ + void set_gmr_parameters( const gmr_parameters& res_parameters ); // *bos* //guaranteed minimum resources which is abbreviated gmr + void update_account_usage( const flat_set& accounts, uint32_t ordinal ); void add_transaction_usage( const flat_set& accounts, uint64_t cpu_usage, uint64_t net_usage, uint32_t ordinal ); @@ -58,7 +76,7 @@ namespace eosio { namespace chain { namespace resource_limits { /// set_account_limits returns true if new ram_bytes limit is more restrictive than the previously set one bool set_account_limits( const account_name& account, int64_t ram_bytes, int64_t net_weight, int64_t cpu_weight); - void get_account_limits( const account_name& account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight) const; + void get_account_limits( const account_name& account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight, bool raw = false) const; // *bos* add raw void process_account_limit_updates(); void process_block_usage( uint32_t block_num ); @@ -86,3 +104,5 @@ namespace eosio { namespace chain { namespace resource_limits { FC_REFLECT( eosio::chain::resource_limits::account_resource_limit, (used)(available)(max) ) FC_REFLECT( eosio::chain::resource_limits::ratio, (numerator)(denominator)) FC_REFLECT( eosio::chain::resource_limits::elastic_limit_parameters, (target)(max)(periods)(max_multiplier)(contract_rate)(expand_rate)) + +FC_REFLECT( eosio::chain::resource_limits::gmr_parameters, (ram_byte)(cpu_us)(net_byte)) diff --git a/libraries/chain/include/eosio/chain/resource_limits_private.hpp b/libraries/chain/include/eosio/chain/resource_limits_private.hpp index 687a56a4d90..877c77e5acb 100644 --- a/libraries/chain/include/eosio/chain/resource_limits_private.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits_private.hpp @@ -203,6 +203,24 @@ namespace eosio { namespace chain { namespace resource_limits { > >; + class gmr_config_object : public chainbase::object { + OBJECT_CTOR(gmr_config_object); + id_type id; + + + gmr_parameters res_parameters = { config::default_gmr_ram_limit,config::default_gmr_cpu_limit, config::default_gmr_net_limit}; + + }; + + using gmr_config_index = chainbase::shared_multi_index_container< + gmr_config_object, + indexed_by< + ordered_unique, member> + > + >; + + + class resource_limits_state_object : public chainbase::object { OBJECT_CTOR(resource_limits_state_object); id_type id; @@ -265,9 +283,14 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_usage_object, CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_config_object, eosio::chain::resource_limits::resource_limits_config_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_state_object, eosio::chain::resource_limits::resource_limits_state_index) +CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::gmr_config_object, eosio::chain::resource_limits::gmr_config_index) + FC_REFLECT(eosio::chain::resource_limits::usage_accumulator, (last_ordinal)(value_ex)(consumed)) FC_REFLECT(eosio::chain::resource_limits::resource_limits_object, (owner)(net_weight)(cpu_weight)(ram_bytes)) FC_REFLECT(eosio::chain::resource_limits::resource_usage_object, (owner)(net_usage)(cpu_usage)(ram_usage)) FC_REFLECT(eosio::chain::resource_limits::resource_limits_config_object, (cpu_limit_parameters)(net_limit_parameters)(account_cpu_usage_average_window)(account_net_usage_average_window)) -FC_REFLECT(eosio::chain::resource_limits::resource_limits_state_object, (average_block_net_usage)(average_block_cpu_usage)(pending_net_usage)(pending_cpu_usage)(total_net_weight)(total_cpu_weight)(total_ram_bytes)(virtual_net_limit)(virtual_cpu_limit)) \ No newline at end of file +FC_REFLECT(eosio::chain::resource_limits::resource_limits_state_object, (average_block_net_usage)(average_block_cpu_usage)(pending_net_usage)(pending_cpu_usage)(total_net_weight)(total_cpu_weight)(total_ram_bytes)(virtual_net_limit)(virtual_cpu_limit)) + + +FC_REFLECT(eosio::chain::resource_limits::gmr_config_object, (res_parameters)) diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 4610f24c891..028b050a595 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -148,6 +148,7 @@ namespace eosio { namespace chain { index_double_object_type, index_long_double_object_type, global_property_object_type, + global_property2_object_type, dynamic_global_property_object_type, block_summary_object_type, transaction_object_type, @@ -169,6 +170,7 @@ namespace eosio { namespace chain { resource_usage_object_type, resource_limits_state_object_type, resource_limits_config_object_type, + gmr_config_object_type, ///< Defined by bos account_history_object_type, ///< Defined by history_plugin action_history_object_type, ///< Defined by history_plugin reversible_block_object_type, diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index fa38f76a1e2..21865ac8676 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -13,7 +14,8 @@ using resource_index_set = index_set< resource_limits_index, resource_usage_index, resource_limits_state_index, - resource_limits_config_index + resource_limits_config_index, + gmr_config_index >; static_assert( config::rate_limiting_precision > 0, "config::rate_limiting_precision must be positive" ); @@ -36,6 +38,13 @@ void elastic_limit_parameters::validate()const { EOS_ASSERT( expand_rate.denominator > 0, resource_limit_exception, "elastic limit parameter 'expand_rate' is not a well-defined ratio" ); } +void gmr_parameters::validate()const { + + EOS_ASSERT( cpu_us > 0, resource_limit_exception, "guaranteed minmum resources parameter 'cpu_us' cannot be zero" ); + EOS_ASSERT( net_byte > 0, resource_limit_exception, "guaranteed minmum resources parameter 'net_byte' cannot be zero" ); + EOS_ASSERT( ram_byte >= 0, resource_limit_exception, "guaranteed minmum resources parameter'ram_byte' cannot be less than zero" ); +} + void resource_limits_state_object::update_virtual_cpu_limit( const resource_limits_config_object& cfg ) { //idump((average_block_cpu_usage.average())); @@ -56,6 +65,10 @@ void resource_limits_manager::initialize_database() { // see default settings in the declaration }); + const auto& gmr_config = _db.create([](gmr_config_object& config){ + // see default settings in the declaration + }); + _db.create([&config](resource_limits_state_object& state){ // see default settings in the declaration @@ -108,6 +121,15 @@ void resource_limits_manager::set_block_parameters(const elastic_limit_parameter }); } +//guaranteed minimum resources which is abbreviated gmr +void resource_limits_manager::set_gmr_parameters(const gmr_parameters& res_parameters) { + res_parameters.validate(); + const auto& config = _db.get(); + _db.modify(config, [&](gmr_config_object& c){ + c.res_parameters = res_parameters; + }); +} + void resource_limits_manager::update_account_usage(const flat_set& accounts, uint32_t time_slot ) { const auto& config = _db.get(); for( const auto& a : accounts ) { @@ -123,6 +145,10 @@ void resource_limits_manager::add_transaction_usage(const flat_set const auto& state = _db.get(); const auto& config = _db.get(); + + //guaranteed minimum resources which is abbreviated gmr + const auto& gmr = _db.get().res_parameters; // *bos* + for( const auto& a : accounts ) { const auto& usage = _db.get( a ); @@ -144,7 +170,7 @@ void resource_limits_manager::add_transaction_usage(const flat_set uint128_t user_weight = (uint128_t)cpu_weight; uint128_t all_user_weight = state.total_cpu_weight; - auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight; + auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight+gmr.cpu_us; EOS_ASSERT( cpu_used_in_window <= max_user_use_in_window, tx_cpu_usage_exceeded, @@ -163,7 +189,7 @@ void resource_limits_manager::add_transaction_usage(const flat_set uint128_t user_weight = (uint128_t)net_weight; uint128_t all_user_weight = state.total_net_weight; - auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight; + auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight+gmr.net_byte; EOS_ASSERT( net_used_in_window <= max_user_use_in_window, tx_net_usage_exceeded, @@ -269,8 +295,9 @@ bool resource_limits_manager::set_account_limits( const account_name& account, i return decreased_limit; } -void resource_limits_manager::get_account_limits( const account_name& account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight ) const { +void resource_limits_manager::get_account_limits( const account_name& account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight, bool raw ) const { const auto* pending_buo = _db.find( boost::make_tuple(true, account) ); + const auto& gmr = _db.get().res_parameters; // *bos* if (pending_buo) { ram_bytes = pending_buo->ram_bytes; net_weight = pending_buo->net_weight; @@ -281,6 +308,13 @@ void resource_limits_manager::get_account_limits( const account_name& account, i net_weight = buo.net_weight; cpu_weight = buo.cpu_weight; } + + // *bos* + const int64_t ONEKM = 1024; + if (!raw && ram_bytes >= ONEKM) + { + ram_bytes += gmr.ram_byte; + } } @@ -373,6 +407,7 @@ account_resource_limit resource_limits_manager::get_account_cpu_limit_ex( const const auto& state = _db.get(); const auto& usage = _db.get(name); const auto& config = _db.get(); + const auto& gmr = _db.get().res_parameters; // *bos* int64_t cpu_weight, x, y; get_account_limits( name, x, y, cpu_weight ); @@ -389,7 +424,7 @@ account_resource_limit resource_limits_manager::get_account_cpu_limit_ex( const uint128_t user_weight = (uint128_t)cpu_weight; uint128_t all_user_weight = (uint128_t)state.total_cpu_weight; - auto max_user_use_in_window = (virtual_cpu_capacity_in_window * user_weight) / all_user_weight; + auto max_user_use_in_window = (virtual_cpu_capacity_in_window * user_weight) / all_user_weight + gmr.cpu_us; auto cpu_used_in_window = impl::integer_divide_ceil((uint128_t)usage.cpu_usage.value_ex * window_size, (uint128_t)config::rate_limiting_precision); if( max_user_use_in_window <= cpu_used_in_window ) @@ -411,6 +446,7 @@ account_resource_limit resource_limits_manager::get_account_net_limit_ex( const const auto& config = _db.get(); const auto& state = _db.get(); const auto& usage = _db.get(name); + const auto& gmr = _db.get().res_parameters; // *bos* int64_t net_weight, x, y; get_account_limits( name, x, net_weight, y ); @@ -428,7 +464,7 @@ account_resource_limit resource_limits_manager::get_account_net_limit_ex( const uint128_t all_user_weight = (uint128_t)state.total_net_weight; - auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight; + auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight + gmr.net_byte; auto net_used_in_window = impl::integer_divide_ceil((uint128_t)usage.net_usage.value_ex * window_size, (uint128_t)config::rate_limiting_precision); if( max_user_use_in_window <= net_used_in_window ) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index f03fb71f445..f63d836d377 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -148,7 +148,7 @@ class privileged_api : public context_aware_api { } void get_resource_limits( account_name account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight ) { - context.control.get_resource_limits_manager().get_account_limits( account, ram_bytes, net_weight, cpu_weight); + context.control.get_resource_limits_manager().get_account_limits( account, ram_bytes, net_weight, cpu_weight, true); // *bos* add raw=true } int64_t set_proposed_producers( array_ptr packed_producer_schedule, size_t datalen) { @@ -192,6 +192,42 @@ class privileged_api : public context_aware_api { }); } + // *bos begin* + void set_name_list_packed(int64_t list, int64_t action, array_ptr packed_name_list, size_t datalen) + { + int64_t lstbegin = static_cast(list_type::actor_blacklist_type ); + int64_t lstend = static_cast(list_type::list_type_count); + int64_t actbegin = static_cast(list_action_type::insert_type); + int64_t actend = static_cast(list_action_type::list_action_type_count); + EOS_ASSERT(list >= lstbegin && list < lstend, wasm_execution_error, "unkown name list!"); + EOS_ASSERT(action >= actbegin && action < actend, wasm_execution_error, "unkown action"); + + datastream ds(packed_name_list, datalen); + std::vector name_list; // TODO std::set dosen't work, bug. + fc::raw::unpack(ds, name_list); + + context.control.set_name_list(list, action, name_list); + + + } + + void set_guaranteed_minimum_resources(int64_t ram_byte, int64_t cpu_us, int64_t net_byte) + { + EOS_ASSERT(ram_byte >= 0 && ram_byte <= 100 * 1024, wasm_execution_error, "resouces minimum guarantee for ram limit expected [0, 102400]"); + EOS_ASSERT(cpu_us >= 0 && cpu_us <= 100 * 1000, wasm_execution_error, "resouces minimum guarantee for cpu limit expected [0, 100000]"); + EOS_ASSERT(net_byte >= 0 && net_byte <= 100 * 1024, wasm_execution_error, "resouces minimum guarantee for net limit expected [0, 102400]"); + + //guaranteed minimum resources which is abbreviated gmr + context.db.modify(context.control.get_global_properties2(), + [&](auto &gprops2) { + gprops2.gmr.ram_byte = ram_byte; + gprops2.gmr.cpu_us = cpu_us; + gprops2.gmr.net_byte = net_byte; + }); + } + + // *bos end* + bool is_privileged( account_name n )const { return context.db.get( n ).privileged; } @@ -1362,6 +1398,14 @@ class context_free_transaction_api : public context_aware_api { return context.get_packed_transaction().size(); } + void get_transaction_id( fc::sha256& id ) { + id = context.trx_context.id; + } + + void get_action_sequence(uint64_t& seq){ + seq = context.global_action_sequence; + } + int expiration() { return context.trx_context.trx.expiration.sec_since_epoch(); } @@ -1644,6 +1688,81 @@ class call_depth_api : public context_aware_api { } }; + +class action_seed_api : public context_aware_api { +public: + action_seed_api(apply_context& ctx) + : context_aware_api(ctx) {} + + int bpsig_action_time_seed(array_ptr sig, size_t siglen) { + auto data = action_timestamp(); + fc::sha256::encoder encoder; + encoder.write(reinterpret_cast(data.data()), data.size()* sizeof(uint32_t)); + auto digest = encoder.result(); + optional signature; + auto block_state = context.control.pending_block_state(); + for (auto& extension: block_state->block->block_extensions) { + if (extension.first != static_cast(block_extension_type::bpsig_action_time_seed)) continue; + EOS_ASSERT(extension.second.size() > 8, transaction_exception, "invalid producer signature in block extensions"); + uint64_t* act_parts = reinterpret_cast(extension.second.data()); + if ( act_parts[0] != context.global_action_sequence) continue; + + auto sig_data = extension.second.data() + 8; + auto sig_size = extension.second.size() - 8; + signature.emplace(); + datastream ds(sig_data, sig_size); + fc::raw::unpack(ds, *signature); + auto check = fc::crypto::public_key(*signature, digest, false); + EOS_ASSERT( check == block_state->block_signing_key, transaction_exception, "wrong expected key different than recovered key" ); + break; + } + bool sign = false; + if (context.control.is_producing_block() && !signature) { + auto signer = context.control.pending_producer_signer(); + if (signer) { + // Producer is producing this block + signature = signer(digest); + sign = true; + } else { + // Non-producer is speculating this block, so skips the signing + // TODO: speculating result will be different from producing result + signature.emplace(); + } + } + EOS_ASSERT(!!signature, transaction_exception, "empty sig action seed"); + auto& s = *signature; + auto sig_size = fc::raw::pack_size(s); + if (siglen == 0) return sig_size; + if (sig_size <= siglen) { + datastream ds(sig, sig_size); + fc::raw::pack(ds, s); + if (sign) { + block_state->block->block_extensions.emplace_back(); + char* act_parts = reinterpret_cast(&context.global_action_sequence); + auto &extension = block_state->block->block_extensions.back(); + extension.first = static_cast(block_extension_type::bpsig_action_time_seed); + extension.second.resize(8 + sig_size); + std::copy(act_parts, act_parts + 8, extension.second.data()); + std::copy((char*)sig, (char*)sig + sig_size, extension.second.data() + 8); + } + return sig_size; + } + return 0; + } +private: + vector action_timestamp() { + auto current = context.control.pending_block_time().time_since_epoch().count(); + current -= current % (config::block_interval_us); + + uint32_t* current_halves = reinterpret_cast(¤t); + uint32_t* act_parts = reinterpret_cast(&context.global_action_sequence); + return vector{act_parts[0],act_parts[1], current_halves[0], current_halves[1]}; + } +}; +REGISTER_INTRINSICS(action_seed_api, +(bpsig_action_time_seed, int(int, int) ) +); + REGISTER_INJECTED_INTRINSICS(call_depth_api, (call_depth_assert, void() ) ); @@ -1702,6 +1821,8 @@ REGISTER_INTRINSICS(privileged_api, (set_proposed_producers, int64_t(int,int) ) (get_blockchain_parameters_packed, int(int, int) ) (set_blockchain_parameters_packed, void(int,int) ) + (set_name_list_packed, void(int64_t,int64_t,int,int) ) + (set_guaranteed_minimum_resources, void(int64_t,int64_t,int64_t) ) (is_privileged, int(int64_t) ) (set_privileged, void(int64_t, int) ) ); @@ -1823,6 +1944,8 @@ REGISTER_INTRINSICS(console_api, REGISTER_INTRINSICS(context_free_transaction_api, (read_transaction, int(int, int) ) (transaction_size, int() ) + (get_transaction_id, void(int) ) + (get_action_sequence, void(int) ) (expiration, int() ) (tapos_block_prefix, int() ) (tapos_block_num, int() ) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 9a4f4094330..22e52407953 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -140,7 +140,7 @@ namespace eosio { namespace testing { return traces; } - void push_genesis_block(); + void push_genesis_block(); vector get_producer_keys( const vector& producer_names )const; transaction_trace_ptr set_producers(const vector& producer_names); diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 9b0b17b9d0a..9d4a6ab93b3 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -19,6 +19,8 @@ add_subdirectory(mongo_db_plugin) add_subdirectory(login_plugin) add_subdirectory(test_control_plugin) add_subdirectory(test_control_api_plugin) +add_subdirectory(kafka_plugin) +add_subdirectory(notify_plugin) # Forward variables to top level so packaging picks them up set(CPACK_DEBIAN_PACKAGE_DEPENDS ${CPACK_DEBIAN_PACKAGE_DEPENDS} PARENT_SCOPE) diff --git a/plugins/history_api_plugin/history_api_plugin.cpp b/plugins/history_api_plugin/history_api_plugin.cpp index bd78dede086..286e97f6e54 100644 --- a/plugins/history_api_plugin/history_api_plugin.cpp +++ b/plugins/history_api_plugin/history_api_plugin.cpp @@ -32,17 +32,15 @@ void history_api_plugin::plugin_initialize(const variables_map&) {} }} #define CHAIN_RO_CALL(call_name) CALL(history, ro_api, history_apis::read_only, call_name) -//#define CHAIN_RW_CALL(call_name) CALL(history, rw_api, history_apis::read_write, call_name) void history_api_plugin::plugin_startup() { ilog( "starting history_api_plugin" ); auto ro_api = app().get_plugin().get_read_only_api(); - //auto rw_api = app().get_plugin().get_read_write_api(); app().get_plugin().add_api({ -// CHAIN_RO_CALL(get_transaction), CHAIN_RO_CALL(get_actions), CHAIN_RO_CALL(get_transaction), + CHAIN_RO_CALL(get_block_detail), CHAIN_RO_CALL(get_key_accounts), CHAIN_RO_CALL(get_controlled_accounts) }); diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 8245215b061..f3838726726 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -372,8 +372,9 @@ namespace eosio { namespace history_apis { + read_only::get_actions_result read_only::get_actions( const read_only::get_actions_params& params )const { - edump((params)); + edump((params)); auto& chain = history->chain_plug->chain(); const auto& db = chain.db(); const auto abi_serializer_max_time = history->chain_plug->get_abi_serializer_max_time(); @@ -564,6 +565,94 @@ namespace eosio { return result; } + fc::variant read_only::get_block_detail(const read_only::get_block_detail_params& params) const { + static char const TRANSACTIONS[] = "transactions"; + static char const TRX[] = "trx"; + static char const ID[] = "id"; + static char const TRACES[] = "traces"; + + auto & plugin = history->chain_plug; + auto & chain = plugin->chain(); + + auto get_object_value = [](fc::variant const& src, char const * key) -> fc::variant const & { + static auto const null_variant = fc::variant(); + + if ( !src.is_object() ) + return null_variant; + + auto & obj = src.get_object(); + auto const & itr = obj.find(key); + if ( itr == obj.end() ) + return null_variant; + + return itr->value(); + }; + + auto get_tx_array = [&get_object_value](fc::variant const& block) -> fc::variants const & { + static auto const null_variants = fc::variants(); + + auto & value = get_object_value(block, TRANSACTIONS); + if ( !value.is_array() ) + return null_variants; + + return value.get_array(); + }; + + auto get_tx_id = [&get_object_value](fc::variant const& tx) -> optional { + auto & id = get_object_value(get_object_value(tx, TRX), ID); + if ( !id.is_string() ) + return fc::optional(); + + return fc::optional(id.get_string()); + }; + + auto const & src = plugin->get_read_only_api().get_block( + chain_apis::read_only::get_block_params { + /*block_num_or_id = */ params.block_num_or_id + } + ); + + auto & rhs = get_tx_array(src); + if ( rhs.empty() ) + return src; + + auto lhs = fc::variants(); + lhs.reserve(rhs.size()); + + auto & database = chain.db(); + auto & index = database.get_index(); + auto const abi_serializer_max_time = plugin->get_abi_serializer_max_time(); + for ( auto const & tx : rhs ) { + auto maybe_id = get_tx_id(tx); + if ( maybe_id ) { + auto id = *maybe_id; + auto itr = index.lower_bound(boost::make_tuple(id)); + auto traces = fc::variants(); + + while ( itr != index.end() && itr->trx_id == id ) { + + fc::datastream ds( itr->packed_action_trace.data(), itr->packed_action_trace.size() ); + action_trace t; + fc::raw::unpack( ds, t ); + traces.emplace_back( chain.to_variant_with_abi(t, abi_serializer_max_time) ); + + ++itr; + } + + if ( !traces.empty() ) { + auto new_trx = fc::mutable_variant_object(tx[TRX])(TRACES, traces); + auto new_tx = fc::mutable_variant_object(tx).set(TRX, move(new_trx)); + lhs.emplace_back(move(new_tx)); + continue; + } + } + + lhs.emplace_back(tx); + } + + return fc::mutable_variant_object(src).set(TRANSACTIONS, move(lhs)); + } + read_only::get_key_accounts_results read_only::get_key_accounts(const get_key_accounts_params& params) const { std::set accounts; const auto& db = history->chain_plug->chain().db(); @@ -586,6 +675,4 @@ namespace eosio { } /// history_apis - - } /// namespace eosio diff --git a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp index b6801b30a29..838f9b24662 100644 --- a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp +++ b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp @@ -50,7 +50,6 @@ class read_only { optional time_limit_exceeded_error; }; - get_actions_result get_actions( const get_actions_params& )const; @@ -71,8 +70,6 @@ class read_only { get_transaction_result get_transaction( const get_transaction_params& )const; - - /* struct ordered_transaction_results { uint32_t seq_num; @@ -83,6 +80,12 @@ class read_only { get_transactions_results get_transactions(const get_transactions_params& params) const; */ + struct get_block_detail_params { + string block_num_or_id; + }; + + fc::variant get_block_detail(const get_block_detail_params& params) const; + struct get_key_accounts_params { chain::public_key_type public_key; @@ -150,7 +153,10 @@ FC_REFLECT(eosio::history_apis::read_only::get_transactions_params, (account_nam FC_REFLECT(eosio::history_apis::read_only::ordered_transaction_results, (seq_num)(transaction_id)(transaction) ) FC_REFLECT(eosio::history_apis::read_only::get_transactions_results, (transactions)(time_limit_exceeded_error) ) */ -FC_REFLECT(eosio::history_apis::read_only::get_key_accounts_params, (public_key) ) -FC_REFLECT(eosio::history_apis::read_only::get_key_accounts_results, (account_names) ) -FC_REFLECT(eosio::history_apis::read_only::get_controlled_accounts_params, (controlling_account) ) -FC_REFLECT(eosio::history_apis::read_only::get_controlled_accounts_results, (controlled_accounts) ) + +FC_REFLECT( eosio::history_apis::read_only::get_block_detail_params, (block_num_or_id) ) + +FC_REFLECT( eosio::history_apis::read_only::get_key_accounts_params, (public_key) ) +FC_REFLECT( eosio::history_apis::read_only::get_key_accounts_results, (account_names) ) +FC_REFLECT( eosio::history_apis::read_only::get_controlled_accounts_params, (controlling_account) ) +FC_REFLECT( eosio::history_apis::read_only::get_controlled_accounts_results, (controlled_accounts) ) diff --git a/plugins/kafka_plugin/CMakeLists.txt b/plugins/kafka_plugin/CMakeLists.txt new file mode 100644 index 00000000000..62f6127148f --- /dev/null +++ b/plugins/kafka_plugin/CMakeLists.txt @@ -0,0 +1,10 @@ +file(GLOB HEADERS "*.hpp") +add_library(kafka_plugin + kafka_plugin.cpp kafka.cpp try_handle.cpp + ${HEADERS}) + +find_package(Cppkafka) +find_package(RdKafka) + +target_include_directories(kafka_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ${CPPKAFKA_INCLUDE_DIR}) +target_link_libraries(kafka_plugin chain_plugin appbase ${CPPKAFKA_LIBRARY} RdKafka::rdkafka) diff --git a/plugins/kafka_plugin/fifo.h b/plugins/kafka_plugin/fifo.h new file mode 100644 index 00000000000..c65dbe2bf81 --- /dev/null +++ b/plugins/kafka_plugin/fifo.h @@ -0,0 +1,85 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace kafka { + +constexpr std::size_t FIFO_MAX_PUSH_SIZE = 1024; +constexpr std::size_t FIFO_MAX_POP_SIZE = 1024; + +template +class fifo : public boost::noncopyable { +public: + fifo(std::size_t max_push_size = FIFO_MAX_PUSH_SIZE, std::size_t max_pop_size = FIFO_MAX_POP_SIZE); + void push(const T& element); + std::vector pop(); + bool empty(); + void awaken(); + +private: + std::mutex mux_; + std::condition_variable not_empty_cv_; + std::condition_variable not_full_cv_; + bool non_blocking_{}; + std::deque deque_; + std::size_t max_push_size_; + std::size_t max_pop_size_; +}; + +template +fifo::fifo(std::size_t max_push_size, std::size_t max_pop_size) { + max_push_size_ = max_push_size; + max_pop_size_ = max_pop_size; +} + +template +void fifo::push(const T& element) { + std::unique_lock lock(mux_); + + if (deque_.size() >= max_push_size_) { + not_full_cv_.wait(lock, [&] { + return non_blocking_ || deque_.size() < max_push_size_; + }); + } + + deque_.push_back(element); + not_empty_cv_.notify_one(); +} + +template +std::vector fifo::pop() { + std::unique_lock lock(mux_); + if (deque_.empty()) { + not_empty_cv_.wait(lock, [&] { + return non_blocking_ || !deque_.empty(); + }); + } + + std::vector result; + for (std::size_t i = 0; i < max_pop_size_ && !deque_.empty(); ++i) { + result.push_back(std::move(deque_.front())); + deque_.pop_front(); + } + not_full_cv_.notify_all(); + return result; +} + +template +bool fifo::empty() { + std::unique_lock lock(mux_); + return deque_.empty(); +} + +template +void fifo::awaken() { + non_blocking_ = true; + not_empty_cv_.notify_all(); + not_full_cv_.notify_all(); +} + +} diff --git a/plugins/kafka_plugin/kafka.cpp b/plugins/kafka_plugin/kafka.cpp new file mode 100644 index 00000000000..7eee52b8386 --- /dev/null +++ b/plugins/kafka_plugin/kafka.cpp @@ -0,0 +1,185 @@ +#include "kafka.hpp" + +#include + +#include "try_handle.hpp" + +namespace std { +template<> struct hash { + typedef kafka::bytes argument_type; + typedef size_t result_type; + result_type operator()(argument_type const& s) const noexcept { + return std::hash{}(string(s.begin(), s.end())); + } +}; +} + +namespace kafka { + +using chain::account_name; +using chain::action_name; +using chain::block_id_type; +using chain::permission_name; +using chain::transaction; +using chain::signed_transaction; +using chain::signed_block; +using chain::transaction_id_type; + +namespace { + +inline bytes checksum_bytes(const fc::sha256& s) { return bytes(s.data(), s.data() + sizeof(fc::sha256)); } + +TransactionStatus transactionStatus(fc::enum_type status) { + if (status == chain::transaction_receipt::executed) return TransactionStatus::executed; + else if (status == chain::transaction_receipt::soft_fail) return TransactionStatus::soft_fail; + else if (status == chain::transaction_receipt::hard_fail) return TransactionStatus::hard_fail; + else if (status == chain::transaction_receipt::delayed) return TransactionStatus::delayed; + else if (status == chain::transaction_receipt::expired) return TransactionStatus::expired; + else return TransactionStatus::unknown; +} + +} + +void kafka::set_config(Configuration config) { + config_ = config; +} + +void kafka::set_topics(const string& block_topic, const string& tx_topic, const string& tx_trace_topic, const string& action_topic) { + block_topic_ = block_topic; + tx_topic_ = tx_topic; + tx_trace_topic_ = tx_trace_topic; + action_topic_ = action_topic; +} + +void kafka::set_partition(int partition) { + partition_ = partition; +} + +void kafka::start() { + producer_ = std::make_unique(config_); + + auto conf = producer_->get_configuration().get_all(); + ilog("Kafka config: ${conf}", ("conf", conf)); +} + +void kafka::stop() { + producer_->flush(); + + producer_.reset(); +} + +void kafka::push_block(const chain::block_state_ptr& block_state, bool irreversible) { + const auto& header = block_state->header; + auto b = std::make_shared(); + + b->id = checksum_bytes(block_state->id); + b->num = block_state->block_num; + b->timestamp = header.timestamp; + + b->lib = irreversible; + + b->block = fc::raw::pack(*block_state->block); + b->tx_count = static_cast(block_state->block->transactions.size()); + + uint16_t seq{}; + for (const auto& tx_receipt: block_state->block->transactions) { + auto count = push_transaction(tx_receipt, b, seq++); + b->action_count += count.first; + b->context_free_action_count += count.second; + } + + consume_block(b); +} + +std::pair kafka::push_transaction(const chain::transaction_receipt& tx_receipt, const BlockPtr& block, uint16_t block_seq) { + auto t = std::make_shared(); + if(tx_receipt.trx.contains()) { + t->id = checksum_bytes(tx_receipt.trx.get()); + } else { + auto signed_tx = tx_receipt.trx.get().get_signed_transaction(); + t->id = checksum_bytes(signed_tx.id()); + t->action_count = static_cast(signed_tx.actions.size()); + t->context_free_action_count = static_cast(signed_tx.context_free_actions.size()); + } + t->block_id = block->id; + t->block_num = block->num; + t->block_time = block->timestamp; + t->block_seq = block_seq; + + consume_transaction(t); + + return {t->action_count, t->context_free_action_count}; +} + +void kafka::push_transaction_trace(const chain::transaction_trace_ptr& tx_trace) { + auto t = std::make_shared(); + + t->id = checksum_bytes(tx_trace->id); + t->block_num = tx_trace->block_num; + t->scheduled = tx_trace->scheduled; + if (tx_trace->receipt) { + t->status = transactionStatus(tx_trace->receipt->status); + t->cpu_usage_us = tx_trace->receipt->cpu_usage_us; + t->net_usage_words = tx_trace->receipt->net_usage_words; + } + if (tx_trace->except) { + t->exception = tx_trace->except->to_string(); + } + + consume_transaction_trace(t); + + for (auto& action_trace: tx_trace->action_traces) { + push_action(action_trace, 0, t); // 0 means no parent + } +} + +void kafka::push_action(const chain::action_trace& action_trace, uint64_t parent_seq, const TransactionTracePtr& tx) { + auto a = std::make_shared(); + + a->global_seq = action_trace.receipt.global_sequence; + a->recv_seq = action_trace.receipt.recv_sequence; + a->parent_seq = parent_seq; + a->account = action_trace.act.account; + a->name = action_trace.act.name; + if (not action_trace.act.authorization.empty()) a->auth = fc::raw::pack(action_trace.act.authorization); + a->data = action_trace.act.data; + a->receiver = action_trace.receipt.receiver; + if (not action_trace.receipt.auth_sequence.empty()) a->auth_seq = fc::raw::pack(action_trace.receipt.auth_sequence); + a->code_seq = action_trace.receipt.code_sequence; + a->abi_seq = action_trace.receipt.abi_sequence; + a->block_num = action_trace.block_num; + a->tx_id = checksum_bytes(action_trace.trx_id); + if (not action_trace.console.empty()) a->console = action_trace.console; + + consume_action(a); + + for (auto& inline_trace: action_trace.inline_traces) { + push_action(inline_trace, action_trace.receipt.global_sequence, tx); + } +} + +void kafka::consume_block(BlockPtr block) { + auto payload = fc::json::to_string(*block, fc::json::legacy_generator); + Buffer buffer (block->id.data(), block->id.size()); + producer_->produce(MessageBuilder(block_topic_).partition(partition_).key(buffer).payload(payload)); +} + +void kafka::consume_transaction(TransactionPtr tx) { + auto payload = fc::json::to_string(*tx, fc::json::legacy_generator); + Buffer buffer (tx->id.data(), tx->id.size()); + producer_->produce(MessageBuilder(tx_topic_).partition(partition_).key(buffer).payload(payload)); +} + +void kafka::consume_transaction_trace(TransactionTracePtr tx_trace) { + auto payload = fc::json::to_string(*tx_trace, fc::json::legacy_generator); + Buffer buffer (tx_trace->id.data(), tx_trace->id.size()); + producer_->produce(MessageBuilder(tx_trace_topic_).partition(partition_).key(buffer).payload(payload)); +} + +void kafka::consume_action(ActionPtr action) { + auto payload = fc::json::to_string(*action, fc::json::legacy_generator); + Buffer buffer((char*)&action->global_seq, sizeof(action->global_seq)); + producer_->produce(MessageBuilder(action_topic_).partition(partition_).key(buffer).payload(payload)); +} + +} diff --git a/plugins/kafka_plugin/kafka.hpp b/plugins/kafka_plugin/kafka.hpp new file mode 100644 index 00000000000..5242ee872e7 --- /dev/null +++ b/plugins/kafka_plugin/kafka.hpp @@ -0,0 +1,46 @@ +#pragma once + +#include +#include + +#include + +#include "types.hpp" + +namespace kafka { + +using namespace std; +using namespace cppkafka; +using namespace eosio; + +class kafka { +public: + void set_config(Configuration config); + void set_topics(const string& block_topic, const string& tx_topic, const string& tx_trace_topic, const string& action_topic); + void set_partition(int partition); + void start(); + void stop(); + + void push_block(const chain::block_state_ptr& block_state, bool irreversible); + std::pair push_transaction(const chain::transaction_receipt& transaction_receipt, const BlockPtr& block, uint16_t block_seq); + void push_transaction_trace(const chain::transaction_trace_ptr& transaction_trace); + void push_action(const chain::action_trace& action_trace, uint64_t parent_seq, const TransactionTracePtr& tx); + +private: + void consume_block(BlockPtr block); + void consume_transaction(TransactionPtr tx); + void consume_transaction_trace(TransactionTracePtr tx_trace); + void consume_action(ActionPtr action); + + Configuration config_; + string block_topic_; + string tx_topic_; + string tx_trace_topic_; + string action_topic_; + + int partition_{-1}; + + std::unique_ptr producer_; +}; + +} diff --git a/plugins/kafka_plugin/kafka_plugin.cpp b/plugins/kafka_plugin/kafka_plugin.cpp new file mode 100644 index 00000000000..901fd57a29e --- /dev/null +++ b/plugins/kafka_plugin/kafka_plugin.cpp @@ -0,0 +1,166 @@ +#include "kafka_plugin.hpp" + +#include + +#include "kafka.hpp" +#include "try_handle.hpp" + +namespace eosio { + +using namespace std; + +namespace bpo = boost::program_options; +using bpo::options_description; +using bpo::variables_map; + +using kafka::handle; + +enum class compression_codec { + none, + gzip, + snappy, + lz4 +}; + +std::istream& operator>>(std::istream& in, compression_codec& codec) { + std::string s; + in >> s; + if (s == "none") codec = compression_codec::none; + else if (s == "gzip") codec = compression_codec::gzip; + else if (s == "snappy") codec = compression_codec::snappy; + else if (s == "lz4") codec = compression_codec::lz4; + else in.setstate(std::ios_base::failbit); + return in; +} + +static appbase::abstract_plugin& _kafka_relay_plugin = app().register_plugin(); + +kafka_plugin::kafka_plugin() : kafka_(std::make_unique()) {} +kafka_plugin::~kafka_plugin() {} + +void kafka_plugin::set_program_options(options_description&, options_description& cfg) { + cfg.add_options() + ("kafka-enable", bpo::value(), "Kafka enable") + ("kafka-broker-list", bpo::value()->default_value("127.0.0.1:9092"), "Kafka initial broker list, formatted as comma separated pairs of host or host:port, e.g., host1:port1,host2:port2") + ("kafka-block-topic", bpo::value()->default_value("eos.blocks"), "Kafka topic for message `block`") + ("kafka-transaction-topic", bpo::value()->default_value("eos.txs"), "Kafka topic for message `transaction`") + ("kafka-transaction-trace-topic", bpo::value()->default_value("eos.txtraces"), "Kafka topic for message `transaction_trace`") + ("kafka-action-topic", bpo::value()->default_value("eos.actions"), "Kafka topic for message `action`") + ("kafka-batch-num-messages", bpo::value()->default_value(1024), "Kafka minimum number of messages to wait for to accumulate in the local queue before sending off a message set") + ("kafka-queue-buffering-max-ms", bpo::value()->default_value(500), "Kafka how long to wait for kafka-batch-num-messages to fill up in the local queue") + ("kafka-compression-codec", bpo::value()->value_name("none/gzip/snappy/lz4"), "Kafka compression codec to use for compressing message sets, default is snappy") + ("kafka-request-required-acks", bpo::value()->default_value(1), "Kafka indicates how many acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, 1=Only the leader broker will need to ack the message, -1=broker will block until message is committed by all in sync replicas (ISRs) or broker's min.insync.replicas setting before sending response") + ("kafka-message-send-max-retries", bpo::value()->default_value(2), "Kafka how many times to retry sending a failing MessageSet") + ("kafka-start-block-num", bpo::value()->default_value(1), "Kafka starts syncing from which block number") + ("kafka-statistics-interval-ms", bpo::value()->default_value(0), "Kafka statistics emit interval, maximum is 86400000, 0 disables statistics") + ("kafka-fixed-partition", bpo::value()->default_value(-1), "Kafka specify fixed partition for all topics, -1 disables specify") + ; + // TODO: security options +} + +void kafka_plugin::plugin_initialize(const variables_map& options) { + if (not options.count("kafka-enable") || not options.at("kafka-enable").as()) { + wlog("kafka_plugin disabled, since no --kafka-enable=true specified"); + return; + } + + ilog("Initialize kafka plugin"); + configured_ = true; + + string compressionCodec = "snappy"; + if (options.count("kafka-compression-codec")) { + switch (options.at("kafka-compression-codec").as()) { + case compression_codec::none: + compressionCodec = "none"; + break; + case compression_codec::gzip: + compressionCodec = "gzip"; + break; + case compression_codec::snappy: + compressionCodec = "snappy"; + break; + case compression_codec::lz4: + compressionCodec = "lz4"; + break; + } + } + + kafka::Configuration config = { + {"metadata.broker.list", options.at("kafka-broker-list").as()}, + {"batch.num.messages", options.at("kafka-batch-num-messages").as()}, + {"queue.buffering.max.ms", options.at("kafka-queue-buffering-max-ms").as()}, + {"compression.codec", compressionCodec}, + {"request.required.acks", options.at("kafka-request-required-acks").as()}, + {"message.send.max.retries", options.at("kafka-message-send-max-retries").as()}, + {"socket.keepalive.enable", true} + }; + auto stats_interval = options.at("kafka-statistics-interval-ms").as(); + if (stats_interval > 0) { + config.set("statistics.interval.ms", stats_interval); + config.set_stats_callback([](kafka::KafkaHandleBase& handle, const std::string& json) { + ilog("kafka stats: ${json}", ("json", json)); + }); + } + kafka_->set_config(config); + kafka_->set_topics( + options.at("kafka-block-topic").as(), + options.at("kafka-transaction-topic").as(), + options.at("kafka-transaction-trace-topic").as(), + options.at("kafka-action-topic").as() + ); + + if (options.at("kafka-fixed-partition").as() >= 0) { + kafka_->set_partition(options.at("kafka-fixed-partition").as()); + } + + unsigned start_block_num = options.at("kafka-start-block-num").as(); + + // add callback to chain_controller config + chain_plugin_ = app().find_plugin(); + auto& chain = chain_plugin_->chain(); + + block_conn_ = chain.accepted_block.connect([=](const chain::block_state_ptr& b) { + if (not start_sync_) { + if (b->block_num >= start_block_num) start_sync_ = true; + else return; + } + handle([=] { kafka_->push_block(b, false); }, "push block"); + }); + irreversible_block_conn_ = chain.irreversible_block.connect([=](const chain::block_state_ptr& b) { + if (not start_sync_) { + if (b->block_num >= start_block_num) start_sync_ = true; + else return; + } + handle([=] { kafka_->push_block(b, true); }, "push irreversible block"); + }); + transaction_conn_ = chain.applied_transaction.connect([=](const chain::transaction_trace_ptr& t) { + if (not start_sync_) return; + handle([=] { kafka_->push_transaction_trace(t); }, "push transaction"); + }); +} + +void kafka_plugin::plugin_startup() { + if (not configured_) return; + ilog("Starting kafka_plugin"); + kafka_->start(); + ilog("Started kafka_plugin"); +} + +void kafka_plugin::plugin_shutdown() { + if (not configured_) return; + ilog("Stopping kafka_plugin"); + + try { + block_conn_.disconnect(); + irreversible_block_conn_.disconnect(); + transaction_conn_.disconnect(); + + kafka_->stop(); + } catch (const std::exception& e) { + elog("Exception on kafka_plugin shutdown: ${e}", ("e", e.what())); + } + + ilog("Stopped kafka_plugin"); +} + +} diff --git a/plugins/kafka_plugin/kafka_plugin.hpp b/plugins/kafka_plugin/kafka_plugin.hpp new file mode 100644 index 00000000000..0bc66108ad2 --- /dev/null +++ b/plugins/kafka_plugin/kafka_plugin.hpp @@ -0,0 +1,41 @@ +#pragma once + +#include +#include + +namespace kafka { +class kafka; // forward declaration +} + +namespace eosio { + +using namespace appbase; + +class kafka_plugin : public appbase::plugin { +public: + APPBASE_PLUGIN_REQUIRES((kafka_plugin)) + + kafka_plugin(); + virtual ~kafka_plugin(); + + void set_program_options(options_description&, options_description& cfg) override; + + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + +private: + bool configured_{}; + + chain_plugin* chain_plugin_{}; + + boost::signals2::connection block_conn_; + boost::signals2::connection irreversible_block_conn_; + boost::signals2::connection transaction_conn_; + + std::atomic start_sync_{false}; + + std::unique_ptr kafka_; +}; + +} diff --git a/plugins/kafka_plugin/try_handle.cpp b/plugins/kafka_plugin/try_handle.cpp new file mode 100644 index 00000000000..e9be213b652 --- /dev/null +++ b/plugins/kafka_plugin/try_handle.cpp @@ -0,0 +1,17 @@ +#include "try_handle.hpp" + +namespace kafka { + +void handle(std::function handler, const std::string& desc) { + try { + handler(); + } catch (fc::exception& e) { + elog("FC Exception while ${desc}: ${e}", ("e", e.to_string())("desc", desc)); + } catch (std::exception& e) { + elog("STD Exception while ${desc}: ${e}", ("e", e.what())("desc", desc)); + } catch (...) { + elog("Unknown exception while ${desc}", ("desc", desc)); + } +} + +} diff --git a/plugins/kafka_plugin/try_handle.hpp b/plugins/kafka_plugin/try_handle.hpp new file mode 100644 index 00000000000..7d059e34dcc --- /dev/null +++ b/plugins/kafka_plugin/try_handle.hpp @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace kafka { + +void handle(std::function handler, const std::string& desc); + +} diff --git a/plugins/kafka_plugin/types.hpp b/plugins/kafka_plugin/types.hpp new file mode 100644 index 00000000000..a419f4b8c2b --- /dev/null +++ b/plugins/kafka_plugin/types.hpp @@ -0,0 +1,93 @@ +#pragma once + +#include + +namespace kafka { + +using name_t = uint64_t; +using std::string; +using bytes = std::vector; +using eosio::chain::block_timestamp_type; + +struct Block { + bytes id; + unsigned num; + + block_timestamp_type timestamp; + + bool lib; // whether irreversible + + bytes block; + + uint32_t tx_count{}; + uint32_t action_count{}; + uint32_t context_free_action_count{}; +}; + +struct Transaction { + bytes id; + + bytes block_id; + uint32_t block_num; + block_timestamp_type block_time; + + uint16_t block_seq; // the sequence number of this transaction in its block + + uint32_t action_count{}; + uint32_t context_free_action_count{}; +}; + +enum TransactionStatus { + executed, soft_fail, hard_fail, delayed, expired, unknown +}; + +struct TransactionTrace { // new ones will override old ones, typically when status is changed + bytes id; + + uint32_t block_num; + + bool scheduled; + + TransactionStatus status; + unsigned net_usage_words; + uint32_t cpu_usage_us; + + string exception; +}; + +struct Action { + uint64_t global_seq; // the global sequence number of this action + uint64_t recv_seq; // the sequence number of this action for this receiver + + uint64_t parent_seq; // parent action trace global sequence number, only for inline traces + + name_t account; // account name + name_t name; // action name + bytes auth; // binary serialization of authorization array of permission_level + bytes data; // payload + + name_t receiver; // where this action is executed on; may not be equal with `account_`, such as from notification + + bytes auth_seq; + unsigned code_seq; + unsigned abi_seq; + + uint32_t block_num; + bytes tx_id; // the transaction that generated this action + + string console; +}; + +using BlockPtr = std::shared_ptr; +using TransactionPtr = std::shared_ptr; +using TransactionTracePtr = std::shared_ptr; +using ActionPtr = std::shared_ptr; + +} + +FC_REFLECT_ENUM(kafka::TransactionStatus, (executed)(soft_fail)(hard_fail)(delayed)(expired)(unknown)) + +FC_REFLECT(kafka::Block, (id)(num)(timestamp)(lib)(block)(tx_count)(action_count)(context_free_action_count)) +FC_REFLECT(kafka::Transaction, (id)(block_id)(block_num)(block_time)(block_seq)(action_count)(context_free_action_count)) +FC_REFLECT(kafka::TransactionTrace, (id)(block_num)(scheduled)(status)(net_usage_words)(cpu_usage_us)(exception)) +FC_REFLECT(kafka::Action, (global_seq)(recv_seq)(parent_seq)(account)(name)(auth)(data)(receiver)(auth_seq)(code_seq)(abi_seq)(block_num)(tx_id)(console)) diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index a736a9ff464..bbdd357eec7 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -132,6 +132,14 @@ namespace eosio { uint32_t end_block; }; +struct request_p2p_message{ + bool discoverable; + }; + + struct response_p2p_message{ + bool discoverable; + string p2p_peer_list; + }; using net_message = static_variant; + packed_transaction, + response_p2p_message, + request_p2p_message>; } // namespace eosio @@ -159,7 +169,8 @@ FC_REFLECT( eosio::time_message, (org)(rec)(xmt)(dst) ) FC_REFLECT( eosio::notice_message, (known_trx)(known_blocks) ) FC_REFLECT( eosio::request_message, (req_trx)(req_blocks) ) FC_REFLECT( eosio::sync_request_message, (start_block)(end_block) ) - +FC_REFLECT( eosio::request_p2p_message, (discoverable) ) +FC_REFLECT( eosio::response_p2p_message, (discoverable)(p2p_peer_list) ) /** * Goals of Network Code diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9ee380b666f..8283c5c9320 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -118,6 +118,13 @@ namespace eosio { > node_transaction_index; + struct p2p_peer_record{ + string peer_address; + time_point_sec expiry; + bool is_config; + bool discoverable; + bool connected; + }; class net_plugin_impl { public: unique_ptr acceptor; @@ -128,6 +135,9 @@ namespace eosio { uint32_t num_clients = 0; vector supplied_peers; + map p2p_peer_records; + bool p2p_discoverable; + bool request_p2p_flag=true; vector allowed_peers; ///< peer keys allowed to connect std::map private_keys; ///< overlapping with producer keys, also authenticating non-producing nodes @@ -197,6 +207,8 @@ namespace eosio { bool is_valid( const handshake_message &msg); + void send_p2p_request(connection_ptr c); + void handle_message( connection_ptr c, const handshake_message &msg); void handle_message( connection_ptr c, const chain_size_message &msg); void handle_message( connection_ptr c, const go_away_message &msg ); @@ -220,6 +232,8 @@ namespace eosio { void handle_message( connection_ptr c, const sync_request_message &msg); void handle_message( connection_ptr c, const signed_block &msg); void handle_message( connection_ptr c, const packed_transaction &msg); + void handle_message( connection_ptr c, const request_p2p_message &msg); + void handle_message( connection_ptr c, const response_p2p_message &msg); void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void start_txn_timer( ); @@ -575,6 +589,8 @@ namespace eosio { std::function callback); void do_queue_write(); + void send_p2p_request(bool discoverable); + void send_p2p_response(bool discoverable,string p2p_peer_list); /** \brief Process the next message from the pending message buffer * * Process the next message from the pending_message_buffer. @@ -942,6 +958,29 @@ namespace eosio { } + void connection::send_p2p_request(bool discoverable) + { + try + { + enqueue(net_message(request_p2p_message{discoverable})); + } + catch (...) + { + elog("send request_p2p_message message error"); + } + } + void connection::send_p2p_response(bool discoverable, string p2p_list) + { + try + { + enqueue(net_message(response_p2p_message{discoverable, p2p_list})); + } + catch (...) + { + elog("send response_p2p_message message error"); + } + } + void connection::stop_send() { syncing = false; } @@ -1952,7 +1991,8 @@ namespace eosio { if( !err && c->socket->is_open() ) { if (start_session( c )) { c->send_handshake (); - } + send_p2p_request(c); + } } else { if( endpoint_itr != tcp::resolver::iterator() ) { close(c); @@ -1968,6 +2008,37 @@ namespace eosio { } ); } + void net_plugin_impl::send_p2p_request(connection_ptr c) + { + if (p2p_discoverable && request_p2p_flag) + { + auto peer_record = p2p_peer_records.find(c->peer_addr); + if (peer_record != p2p_peer_records.end()) + { + if (peer_record->second.is_config && !peer_record->second.connected) + { + c->send_p2p_request(p2p_discoverable); + peer_record->second.connected = true; + } + else + { + bool stop_flag = true; + for (auto record : p2p_peer_records) + { + if (record.second.is_config && !( record.second.connected||record.second.expiry < time_point::now())) + { + stop_flag = false; + break; + } + } + if (stop_flag) + { + request_p2p_flag = false; + } + } + } + } + } bool net_plugin_impl::start_session( connection_ptr con ) { boost::asio::ip::tcp::no_delay nodelay( true ); boost::system::error_code ec; @@ -2217,6 +2288,56 @@ namespace eosio { } + void net_plugin_impl::handle_message( connection_ptr c, const request_p2p_message &msg){ + peer_ilog(c, "received request_p2p_message"); + string rspm; + for(auto sd :p2p_peer_records){ + if(sd.second.discoverable){ + rspm.append(sd.second.peer_address+"#"); + } + } + if(p2p_discoverable||rspm.size()>0){ + c->send_p2p_response(p2p_discoverable,rspm); + } + } + + void net_plugin_impl::handle_message( connection_ptr c, const response_p2p_message &msg){ + peer_ilog(c, "received response_p2p_message"); + auto peer_record=p2p_peer_records.find(c->peer_addr); + if(peer_record!=p2p_peer_records.end()){ + peer_record->second.discoverable=msg.discoverable; + if (peer_record->second.is_config&&msg.p2p_peer_list.length()>0){ + + vector p2p_peer_list; + int start = 0; + string delim="#"; + int idx = msg.p2p_peer_list.find(delim, start); + string peer_list; + while( idx != std::string::npos ) + { + if(max_nodes_per_host<=connections.size()||max_nodes_per_host<=p2p_peer_records.size()){ + return; + } + peer_list=msg.p2p_peer_list.substr(start, idx-start); + if(peer_list.size()<3){ + break; + } + start = idx+delim.size(); + idx = msg.p2p_peer_list.find(delim, start); + if( find_connection( peer_list )) + continue; + p2p_peer_record p2prcd; + p2prcd.peer_address=peer_list; + p2prcd.discoverable=false; + p2prcd.is_config=true; + p2prcd.connected=false; + p2p_peer_records.insert(pair(peer_list,p2prcd)); + connection_ptr c = std::make_shared(peer_list); + fc_dlog(logger,"adding new connection to the list"); + connections.insert( c ); + }}} + } + void net_plugin_impl::handle_message( connection_ptr c, const handshake_message &msg) { peer_ilog(c, "received handshake_message"); if (!is_valid(msg)) { @@ -2879,6 +3000,8 @@ namespace eosio { ( "sync-fetch-span", bpo::value()->default_value(def_sync_fetch_span), "number of blocks to retrieve in a chunk from any individual peer during synchronization") ( "max-implicit-request", bpo::value()->default_value(def_max_just_send), "maximum sizes of transaction or block messages that are sent without first sending a notice") ( "use-socket-read-watermark", bpo::value()->default_value(false), "Enable expirimental socket read watermark optimization") + ( "p2p-discoverable", bpo::value()->default_value(false), + "True to p2p discoverable.") ( "peer-log-format", bpo::value()->default_value( "[\"${_name}\" ${_ip}:${_port}]" ), "The string used to format peers when logging messages about them. Variables are escaped with ${}.\n" "Available Variables:\n" @@ -2919,6 +3042,8 @@ namespace eosio { my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); + my->p2p_discoverable=options.at( "p2p-discoverable" ).as(); + my->resolver = std::make_shared( std::ref( app().get_io_service())); if( options.count( "p2p-listen-endpoint" )) { my->p2p_address = options.at( "p2p-listen-endpoint" ).as(); @@ -3037,6 +3162,14 @@ namespace eosio { my->start_monitors(); for( auto seed_node : my->supplied_peers ) { + p2p_peer_record p2prcd; + p2prcd.peer_address=seed_node; + p2prcd.discoverable=false; + p2prcd.is_config=true; + p2prcd.connected=false; + p2prcd.expiry=time_point_sec((time_point::now()).sec_since_epoch()+10); + my->p2p_peer_records.insert(pair(seed_node,p2prcd)); + connect( seed_node ); } diff --git a/plugins/notify_plugin/CMakeLists.txt b/plugins/notify_plugin/CMakeLists.txt new file mode 100644 index 00000000000..ea3d85eea9a --- /dev/null +++ b/plugins/notify_plugin/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB HEADERS "include/eosio/notify_plugin/*.hpp") +add_library( notify_plugin + notify_plugin.cpp + ${HEADERS} include/eosio/notify_plugin/notify_plugin.hpp) + +target_link_libraries( notify_plugin chain_plugin eosio_chain appbase fc ) +target_include_directories( notify_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) \ No newline at end of file diff --git a/plugins/notify_plugin/README.md b/plugins/notify_plugin/README.md new file mode 100644 index 00000000000..35080a4962d --- /dev/null +++ b/plugins/notify_plugin/README.md @@ -0,0 +1,73 @@ +# notify_plugin + +Send real time actions on chain to a `receive_url`, which you can use to do some notifications. + +### Usage + +Add some configs to your `config.ini` just as follows: + +``` +## Notify Plugin +plugin = eosio::notify_plugin +# notify-filter-on = account:action +notify-filter-on = b1: +notify-filter-on = noprom:transfer +notify-filter-on = eosio:delegatebw +# http endpoint for each action seen on the chain. +notify-receive-url = http://127.0.0.1:8080/notify +# Age limit in seconds for blocks to send notifications. No age limit if set to negative. +# Used to prevent old actions from trigger HTTP request while on replay (seconds) +notify-age-limit = -1 +# Retry times of sending http notification if failed. +notify-retry-times = 3 +``` + +And you can receive the actions on chain by watching your server endpoint: `http://127.0.0.1:8080/notify`, the data sent to the API endpoint looks like: + +```json +{ + "irreversible": true, + "actions": [{ + "tx_id": "b31885bada6c2d5e71b1302e87d4006c59ff2a40a12108559d76142548d8cf79", + "account": "eosio.token", + "name": "transfer", + "seq_num": 1, + "receiver": "noprom", + "block_time": "2018-09-29T11:51:06.000", + "block_num": 127225, + "authorization": [{ + "actor": "noprom", + "permission": "active" + } + ], + "action_data": { + "from": "noprom", + "to": "noprom1", + "quantity": "0.0001 EOS", + "memo": "Transfer from noprom to xiaoming" + } + },{ + "tx_id": "b31885bada6c2d5e71b1302e87d4006c59ff2a40a12108559d76142548d8cf79", + "account": "eosio.token", + "name": "transfer", + "seq_num": 2, + "receiver": "noprom1", + "block_time": "2018-09-29T11:51:06.000", + "block_num": 127225, + "authorization": [{ + "actor": "noprom", + "permission": "active" + } + ], + "action_data": { + "from": "noprom", + "to": "noprom1", + "quantity": "0.0001 EOS", + "memo": "Transfer from noprom to xiaoming" + } + } + ] +} +``` + +In your server side, you can use these actions to do many things, such as creating a telegram alert bot which you can subscribe on and receive your account's information on chain. \ No newline at end of file diff --git a/plugins/notify_plugin/include/eosio/notify_plugin/http_async_client.hpp b/plugins/notify_plugin/include/eosio/notify_plugin/http_async_client.hpp new file mode 100644 index 00000000000..cfc8f38a294 --- /dev/null +++ b/plugins/notify_plugin/include/eosio/notify_plugin/http_async_client.hpp @@ -0,0 +1,104 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace eosio +{ +using namespace fc; +namespace asio = boost::asio; + +template +struct final_action +{ + final_action(F f) : clean{f} {} + ~final_action() { clean(); } + +private: + F clean; +}; + +template +final_action finally(F f) +{ + return final_action(f); +} + +class http_async_client +{ +public: + http_async_client() : sync_client(std::make_unique()), + work_guard(asio::make_work_guard(ioc)) {} + + ~http_async_client() + { + work_guard.reset(); + } + + void start() + { + worker = std::make_unique([this]() { + ioc.run(); + }); + } + + void stop() + { + work_guard.reset(); + worker->join(); + } + + void set_default_retry_times(int64_t t) { + default_retry_times = t; + } + + template + void post(const url &dest, const T &payload, const time_point &deadline = time_point::maximum()) + { + asio::post(ioc.get_executor(), [this, dest, payload, deadline]() { + post_sync(dest, payload, deadline); + }); + } + +private: + template + void post_sync(const url &dest, const T &payload, + const time_point &deadline = time_point::maximum()) + { + auto exit = finally([this]() { + retry_times = default_retry_times; + }); + + try + { + sync_client->post_sync(dest, payload, deadline); + } + catch (const fc::eof_exception &exc) + { + } + catch (const fc::assert_exception &exc) + { + wlog("Exception while trying to send: ${exc}", ("exc", exc.to_detail_string())); + if (retry_times > 0) + { + wlog("Trying ${t} times: ", ("t", retry_times)); + retry_times--; + post_sync(dest, payload, deadline); + } + } + FC_CAPTURE_AND_LOG((dest)(payload)(deadline)) + }; + + std::unique_ptr sync_client; + std::unique_ptr worker; + asio::io_context ioc; + asio::executor_work_guard work_guard; + int64_t default_retry_times = 3; + int64_t retry_times = default_retry_times; +}; +} // namespace eosio \ No newline at end of file diff --git a/plugins/notify_plugin/include/eosio/notify_plugin/notify_plugin.hpp b/plugins/notify_plugin/include/eosio/notify_plugin/notify_plugin.hpp new file mode 100644 index 00000000000..e2a23a3a74a --- /dev/null +++ b/plugins/notify_plugin/include/eosio/notify_plugin/notify_plugin.hpp @@ -0,0 +1,33 @@ +/** + * @file + * @copyright eospace in eos/LICENSE.txt + */ +#pragma once +#include +#include + +namespace eosio { + +using namespace appbase; +using notify_plugin_ptr = std::unique_ptr; + +/** + * notify_plugin: make notifications to apps on chain. + */ +class notify_plugin : public appbase::plugin { +public: + notify_plugin(); + virtual ~notify_plugin(); + + APPBASE_PLUGIN_REQUIRES((chain_plugin)) + virtual void set_program_options(options_description&, options_description& cfg) override; + + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + +private: + notify_plugin_ptr my; +}; + +} diff --git a/plugins/notify_plugin/notify_plugin.cpp b/plugins/notify_plugin/notify_plugin.cpp new file mode 100644 index 00000000000..65e728e9bbe --- /dev/null +++ b/plugins/notify_plugin/notify_plugin.cpp @@ -0,0 +1,350 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +namespace eosio +{ +static appbase::abstract_plugin &_notify_plugin = app().register_plugin(); +using namespace chain; +typedef uint32_t action_seq_type; + +class notify_plugin_impl +{ +public: + static const int64_t default_age_limit = 60; + static const int64_t default_retry_times = 3; + static const fc::microseconds http_timeout; + static const fc::microseconds max_deserialization_time; + + fc::url receive_url; + int64_t age_limit = default_age_limit; + int64_t retry_times = default_retry_times; + http_async_client httpc; + + struct sequenced_action : public action + { + sequenced_action(const action &act, action_seq_type seq, account_name receiver) + : action(act), seq_num(seq), receiver(receiver) {} + + action_seq_type seq_num; + account_name receiver; + }; + + struct action_notify + { + action_notify(const sequenced_action &act, transaction_id_type tx_id, + const variant &action_data, fc::time_point block_time, + uint32_t block_num) + : tx_id(tx_id), account(act.account), name(act.name), receiver(act.receiver), + seq_num(act.seq_num), block_time(block_time), block_num(block_num), + authorization(act.authorization), action_data(action_data) {} + + transaction_id_type tx_id; + account_name account; + account_name name; + account_name receiver; + action_seq_type seq_num; + fc::time_point block_time; + uint32_t block_num; + + vector authorization; + fc::variant action_data; + }; + + struct message + { + message() : irreversible(false), actions() {} + bool irreversible; + std::vector actions; + }; + + struct filter_entry + { + name receiver; + name action; + + std::tuple key() const + { + return std::make_tuple(receiver, action); + } + + friend bool operator<(const filter_entry &a, const filter_entry &b) + { + return a.key() < b.key(); + } + }; + + typedef std::unordered_multimap action_queue_type; + + chain_plugin *chain_plug = nullptr; + std::set filter_on; + fc::optional accepted_block_conn; + fc::optional irreversible_block_conn; + fc::optional applied_tx_conn; + action_queue_type action_queue; + action_queue_type irreversible_action_queue; + + bool filter(const action_trace &act); + fc::variant deserialize_action_data(action act); + void build_message(message &msg, const block_state_ptr &block, const transaction_id_type &tx_id, bool irreversible); + void send_message(const message &msg); + action_seq_type on_action_trace(const action_trace &act, const transaction_id_type &tx_id, action_seq_type act_s); + void on_applied_tx(const transaction_trace_ptr &trace); + void on_accepted_block(const block_state_ptr &block_state); + void on_irreversible_block(const block_state_ptr &block_state); +}; + +bool notify_plugin_impl::filter(const action_trace &act) +{ + if (filter_on.find({act.receipt.receiver, act.act.name}) != filter_on.end()) + { + return true; + } + else if (filter_on.find({act.receipt.receiver, 0}) != filter_on.end()) + { + return true; + } + return false; +} + +fc::variant notify_plugin_impl::deserialize_action_data(action act) +{ + auto &chain = chain_plug->chain(); + auto serializer = chain.get_abi_serializer(act.account, max_deserialization_time); + FC_ASSERT(serializer.valid() && serializer->get_action_type(act.name) != action_name(), + "Unable to get abi for account: ${acc}, action: ${a} Not sending notification.", + ("acc", act.account)("a", act.name)); + return serializer->binary_to_variant(act.name.to_string(), act.data, max_deserialization_time); +} + +void notify_plugin_impl::build_message(message &msg, const block_state_ptr &block, const transaction_id_type &tx_id, const bool irreversible) +{ + auto range = irreversible ? irreversible_action_queue.equal_range(tx_id) : action_queue.equal_range(tx_id); + + msg.irreversible = irreversible; + for (auto &it = range.first; it != range.second; it++) + { + auto act_data = deserialize_action_data(it->second); + action_notify notify(it->second, tx_id, std::forward(act_data), + block->block->timestamp, block->block->block_num()); + msg.actions.push_back(notify); + } +} + +void notify_plugin_impl::send_message(const message &msg) +{ + try + { + httpc.post(receive_url, msg, fc::time_point::now() + http_timeout); + } + FC_CAPTURE_AND_LOG(("Error while sending notification")(msg)); +} + +action_seq_type notify_plugin_impl::on_action_trace(const action_trace &act, const transaction_id_type &tx_id, + action_seq_type act_s) +{ + if (filter(act)) + { + const auto pair = std::make_pair(tx_id, sequenced_action(act.act, act_s, act.receipt.receiver)); + action_queue.insert(pair); + irreversible_action_queue.insert(pair); + } + act_s++; + + for (const auto &iline : act.inline_traces) + { + act_s = on_action_trace(iline, tx_id, act_s); + } + return act_s; +} + +void notify_plugin_impl::on_applied_tx(const transaction_trace_ptr &trace) +{ + auto id = trace->id; + + if (!action_queue.count(id) || !irreversible_action_queue.count(id)) + { + action_seq_type seq = 0; + for (auto &at : trace->action_traces) + { + seq = on_action_trace(at, id, seq); + } + } +} + +void notify_plugin_impl::on_accepted_block(const block_state_ptr &block_state) +{ + fc::time_point block_time = block_state->block->timestamp; + + if (age_limit == -1 || (fc::time_point::now() - block_time < fc::seconds(age_limit))) + { + message msg; + transaction_id_type tx_id; + for (const auto &trx : block_state->block->transactions) + { + if (trx.trx.contains()) + { + tx_id = trx.trx.get(); + } + else + { + tx_id = trx.trx.get().id(); + } + + if (action_queue.count(tx_id)) + { + build_message(msg, block_state, tx_id, false); + } + } + if (msg.actions.size() > 0) + { + send_message(msg); + } + } + action_queue.clear(); +} + +void notify_plugin_impl::on_irreversible_block(const block_state_ptr &block_state) +{ + fc::time_point block_time = block_state->block->timestamp; + if (age_limit == -1 || (fc::time_point::now() - block_time < fc::seconds(age_limit))) + { + message msg; + transaction_id_type tx_id; + for (const auto &trx : block_state->block->transactions) + { + if (trx.trx.contains()) + { + tx_id = trx.trx.get(); + } + else + { + tx_id = trx.trx.get().id(); + } + + if (irreversible_action_queue.count(tx_id)) + { + build_message(msg, block_state, tx_id, true); + } + } + if (msg.actions.size() > 0) + { + send_message(msg); + irreversible_action_queue.clear(); + } + } +} + +const fc::microseconds notify_plugin_impl::http_timeout = fc::seconds(10); +const fc::microseconds notify_plugin_impl::max_deserialization_time = fc::seconds(5); +const int64_t notify_plugin_impl::default_age_limit; +const int64_t notify_plugin_impl::default_retry_times; + +notify_plugin::notify_plugin() : my(new notify_plugin_impl()) {} +notify_plugin::~notify_plugin() {} + +void notify_plugin::set_program_options(options_description &, options_description &cfg) +{ + cfg.add_options()("notify-filter-on", bpo::value>()->composing(), + "Track actions and make notifications then it match receiver:action. In case action is not specified, " + "all actions to specified account are tracked.") + ("notify-receive-url", bpo::value(), "Notify URL which can receive the notifications") + ("notify-age-limit", bpo::value()->default_value(notify_plugin_impl::default_age_limit), + "Age limit in seconds for blocks to send notifications about." + " No age limit if this is set to negative.") + ("notify-retry-times", bpo::value()->default_value(notify_plugin_impl::default_retry_times), + "Retry times of sending http notification if failed.") + ; +} + +void notify_plugin::plugin_initialize(const variables_map &options) +{ + try + { + EOS_ASSERT(options.count("notify-receive-url") == 1, fc::invalid_arg_exception, + "notify_plugin requires one notify-receiver-url to be specified!"); + + EOS_ASSERT(options.count("notify-age-limit") == 1, fc::invalid_arg_exception, + "notify_plugin requires one notify-age-limit to be specified!"); + + EOS_ASSERT(options.count("notify-retry-times") == 1, fc::invalid_arg_exception, + "notify_plugin requires one notify-retry-times to be specified!"); + + string url_str = options.at("notify-receive-url").as(); + my->receive_url = fc::url(url_str); + + if (options.count("notify-filter-on")) + { + auto fo = options.at("notify-filter-on").as>(); + for (auto &s : fo) + { + std::vector v; + boost::split(v, s, boost::is_any_of(":")); + EOS_ASSERT(v.size() == 2, fc::invalid_arg_exception, + "Invalid value ${s} for --notify-filter-on", + ("s", s)); + notify_plugin_impl::filter_entry fe{v[0], v[1]}; + EOS_ASSERT(fe.receiver.value, fc::invalid_arg_exception, "Invalid value ${s} for --notify-filter-on", ("s", s)); + my->filter_on.insert(fe); + } + } + + if (options.count("notify-age-limit")) + my->age_limit = options.at("notify-age-limit").as(); + + if (options.count("notify-retry-times")) + my->retry_times = options.at("notify-retry-times").as(); + + my->httpc.set_default_retry_times(my->retry_times); + my->chain_plug = app().find_plugin(); + auto &chain = my->chain_plug->chain(); + my->accepted_block_conn.emplace(chain.accepted_block.connect( + [&](const block_state_ptr &b_state) { + my->on_accepted_block(b_state); + })); + + my->irreversible_block_conn.emplace(chain.irreversible_block.connect( + [&](const block_state_ptr &bs) { + my->on_irreversible_block(bs); + })); + + my->applied_tx_conn.emplace(chain.applied_transaction.connect( + [&](const transaction_trace_ptr &tx) { + my->on_applied_tx(tx); + })); + } + FC_LOG_AND_RETHROW() +} + +void notify_plugin::plugin_startup() +{ + ilog("Notify plugin started"); + my->httpc.start(); +} + +void notify_plugin::plugin_shutdown() +{ + my->applied_tx_conn.reset(); + my->accepted_block_conn.reset(); + my->irreversible_block_conn.reset(); + my->httpc.stop(); +} +} // namespace eosio + +FC_REFLECT(eosio::notify_plugin_impl::action_notify, (tx_id)(account)(name)(seq_num)(receiver)(block_time)(block_num)(authorization)(action_data)) +FC_REFLECT(eosio::notify_plugin_impl::message, (irreversible)(actions)) \ No newline at end of file diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c46452b5fb4..2d60e92a006 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1071,8 +1071,13 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool } } + signature_provider_type signature_provider; + if (signature_provider_itr != _signature_providers.end()) { + signature_provider = signature_provider_itr->second; + } + chain.abort_block(); - chain.start_block(block_time, blocks_to_confirm); + chain.start_block(block_time, blocks_to_confirm, signature_provider); } FC_LOG_AND_DROP(); const auto& pbs = chain.pending_block_state(); diff --git a/programs/cleos/httpc.hpp b/programs/cleos/httpc.hpp index 54e60866fbc..850b7bab787 100644 --- a/programs/cleos/httpc.hpp +++ b/programs/cleos/httpc.hpp @@ -105,6 +105,7 @@ namespace eosio { namespace client { namespace http { const string history_func_base = "/v1/history"; const string get_actions_func = history_func_base + "/get_actions"; const string get_transaction_func = history_func_base + "/get_transaction"; + const string get_block_detail_func = history_func_base + "/get_block_detail"; const string get_key_accounts_func = history_func_base + "/get_key_accounts"; const string get_controlled_accounts_func = history_func_base + "/get_controlled_accounts"; diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index b2a2c326353..7124357f158 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -552,12 +552,15 @@ chain::action create_delegate(const name& from, const name& receiver, const asse config::system_account_name, N(delegatebw), act_payload); } -fc::variant regproducer_variant(const account_name& producer, const public_key_type& key, const string& url, uint16_t location) { +fc::variant regproducer_variant(const account_name& producer, const public_key_type& key, const string& url, const string& location) { +auto _location=atoi(location.c_str()); + FC_ASSERT(_location>-12&&_location<=12,"time zone setting is not legal"); + _location=_location>=0?_location:24+_location; return fc::mutable_variant_object() ("producer", producer) ("producer_key", key) ("url", url) - ("location", location) + ("location", _location) ; } @@ -861,14 +864,14 @@ struct register_producer_subcommand { string producer_str; string producer_key_str; string url; - uint16_t loc = 0; + string loc; register_producer_subcommand(CLI::App* actionRoot) { auto register_producer = actionRoot->add_subcommand("regproducer", localized("Register a new producer")); register_producer->add_option("account", producer_str, localized("The account to register as a producer"))->required(); register_producer->add_option("producer_key", producer_key_str, localized("The producer's public key"))->required(); register_producer->add_option("url", url, localized("url where info about producer can be found"), true); - register_producer->add_option("location", loc, localized("relative location for purpose of nearest neighbor scheduling"), true); + register_producer->add_option("location", loc, localized("time zone from -11 to 12 "))->required(); add_standard_transaction_options(register_producer); @@ -2146,6 +2149,15 @@ int main( int argc, char** argv ) { std::cout << fc::json::to_pretty_string(call(get_transaction_func, arg)) << std::endl; }); + // get block detail + string block_detail_arg; + auto getBlockDetail = get->add_subcommand("block_detail", localized("Retrieve a full block from the blockchain"), false); + getBlockDetail->add_option("block", block_detail_arg, localized("The number or ID of the block to retrieve"))->required(); + getBlockDetail->set_callback([&block_detail_arg] { + auto arg = fc::mutable_variant_object("block_num_or_id", block_detail_arg); + std::cout << fc::json::to_pretty_string(call(get_block_detail_func, arg)) << std::endl; + }); + // get actions string account_name; string skip_seq_str; diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 9e1481c23c3..946c8c83200 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -69,6 +69,10 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} if(BUILD_MONGO_DB_PLUGIN) target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} mongo_db_plugin -Wl,${no_whole_archive_flag} ) endif() +# kafka_plugin +target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} kafka_plugin -Wl,${no_whole_archive_flag} ) +# notify_plugin +target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} notify_plugin -Wl,${no_whole_archive_flag} ) include(additionalPlugins) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 6183b0020d5..b32d2a51315 100644 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -622,6 +622,126 @@ fi printf "\\tWASM found at %s/opt/wasm.\\n" "${HOME}" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + function print_instructions() { printf "\\n\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 652da373244..bedf64f6058 100644 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -686,6 +686,126 @@ mongodconf printf "\\t - WASM found at %s/opt/wasm\\n" "${HOME}" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + printf "\\n" function print_instructions() diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index a91e1611d44..f3ee81cf467 100644 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -479,6 +479,126 @@ printf "\\tWASM found at /usr/local/wasm/bin/.\\n" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + function print_instructions() { printf "\\n\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 35cad3d7d8e..238781f2f8f 100644 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -492,6 +492,126 @@ printf "\\n\\tWASM found @ %s/opt/wasm\\n\\n" "${HOME}" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + function print_instructions() { printf "\\n\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 4c9873a60a1..ab3e8823d8e 100644 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -503,6 +503,126 @@ mongodconf printf "\\tWASM found at %s/opt/wasm/bin.\\n" "${HOME}" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + function print_instructions() { printf '\n\texport PATH=${HOME}/opt/mongodb/bin:$PATH\n' diff --git a/unittests/actiondemo/actiondemo.abi b/unittests/actiondemo/actiondemo.abi new file mode 100644 index 00000000000..172d180bfef --- /dev/null +++ b/unittests/actiondemo/actiondemo.abi @@ -0,0 +1,99 @@ +{ + "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-11-17T13:26:02", + "version": "eosio::abi/1.0", + "types": [], + "structs": [{ + "name": "seedobj", + "base": "", + "fields": [{ + "name": "id", + "type": "uint64" + },{ + "name": "create", + "type": "time_point" + },{ + "name": "seedstr", + "type": "string" + },{ + "name": "txid", + "type": "string" + },{ + "name": "action", + "type": "uint64" + } + ] + },{ + "name": "args", + "base": "", + "fields": [{ + "name": "loop", + "type": "uint64" + },{ + "name": "num", + "type": "uint64" + } + ] + },{ + "name": "generate", + "base": "", + "fields": [{ + "name": "t", + "type": "args" + } + ] + },{ + "name": "clear", + "base": "", + "fields": [] + },{ + "name": "args_inline", + "base": "", + "fields": [{ + "name": "payer", + "type": "name" + },{ + "name": "in", + "type": "name" + } + ] + },{ + "name": "inlineact", + "base": "", + "fields": [{ + "name": "t", + "type": "args_inline" + } + ] + } + ], + "actions": [{ + "name": "generate", + "type": "generate", + "ricardian_contract": "" + },{ + "name": "clear", + "type": "clear", + "ricardian_contract": "" + },{ + "name": "inlineact", + "type": "inlineact", + "ricardian_contract": "" + } + ], + "tables": [{ + "name": "seedobjs", + "index_type": "i64", + "key_names": [ + "id" + ], + "key_types": [ + "uint64" + ], + "type": "seedobj" + } + ], + "ricardian_clauses": [], + "error_messages": [], + "abi_extensions": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/actiondemo/actiondemo.cpp b/unittests/actiondemo/actiondemo.cpp new file mode 100644 index 00000000000..3f8a3fcb6e0 --- /dev/null +++ b/unittests/actiondemo/actiondemo.cpp @@ -0,0 +1,106 @@ +#include "actiondemo.hpp" +#include "../../contracts/eosiolib/print.hpp" +#include "../../contracts/eosiolib/types.hpp" +#include "../../contracts/eosiolib/transaction.hpp" + +namespace spaceaction { + + void actiondemo::apply( account_name code, account_name act ) { + + if( code != _self ) + return; + + switch( act ) { + case N(generate): + generate(unpack_action_data()); + return; + case N(inlineact): + inlineact(unpack_action_data()); + case N(clear): + clear(); + return; + } + } + + void actiondemo::clear(){ + //require_auth(_self); + seedobjs table(_self, _self); + auto iter = table.begin(); + while (iter != table.end()) + { + table.erase(iter); + iter = table.begin(); + } + } + + std::string to_hex( const char* d, uint32_t s ) + { + std::string r; + const char* to_hex="0123456789abcdef"; + uint8_t* c = (uint8_t*)d; + for( uint32_t i = 0; i < s; ++i ) + (r += to_hex[(c[i]>>4)]) += to_hex[(c[i] &0x0f)]; + return r; + } + + void actiondemo::generate(const args& t){ + for (int i = 0; i < t.loop; ++i) { + transaction_id_type txid; + get_transaction_id(&txid); + std::string tx = to_hex((char*)&txid.hash, 32); + + uint64_t seq = 0; + get_action_sequence(&seq); + + + size_t szBuff = sizeof(signature); + char buf[szBuff]; + memset(buf,0,szBuff); + size_t size = bpsig_action_time_seed(buf, sizeof(buf)); + eosio_assert(size > 0 && size <= sizeof(buf), "buffer is too small"); + std::string seedstr = to_hex(buf,size); + + + seedobjs table(_self, _self); + uint64_t count = 0; + for (auto itr = table.begin(); itr != table.end(); ++itr) { + ++count; + } + + auto r = table.emplace(_self, [&](auto &a) { + a.id = count + 1; + a.create = eosio::time_point_sec(now()); + a.seedstr = seedstr; + a.txid = tx; + a.action = seq; + }); + print_f("self:%, loop:%, count:%, seedstr:%", name{_self}.to_string(), t.loop, count, r->seedstr); + } + } + + void actiondemo::inlineact(const args_inline& t){ + auto& payer = t.payer; + args gen; + gen.loop = 1; + gen.num = 1; + + generate(gen); + + if(t.in != 0) + { + INLINE_ACTION_SENDER(spaceaction::actiondemo, generate)( t.in, {payer,N(active)}, + { gen}); + INLINE_ACTION_SENDER(spaceaction::actiondemo, generate)( t.in, {payer,N(active)}, + { gen}); + } + + } +} + +extern "C" { +[[noreturn]] void apply(uint64_t receiver, uint64_t code, uint64_t action) { + spaceaction::actiondemo obj(receiver); + obj.apply(code, action); + eosio_exit(0); +} +} \ No newline at end of file diff --git a/unittests/actiondemo/actiondemo.hpp b/unittests/actiondemo/actiondemo.hpp new file mode 100644 index 00000000000..e1d5031bfa5 --- /dev/null +++ b/unittests/actiondemo/actiondemo.hpp @@ -0,0 +1,50 @@ +#pragma once +#include +#include + +namespace spaceaction { + + using namespace eosio; + class actiondemo : public contract { + typedef std::chrono::milliseconds duration; + public: + actiondemo( account_name self ):contract(self){} + + void apply( account_name contract, account_name act ); + + struct args{ + uint64_t loop; + uint64_t num; + }; + //@abi action + void generate(const args& t); + + //@abi action + void clear(); + + + struct args_inline{ + account_name payer; + account_name in; + }; + //@abi action + void inlineact(const args_inline& t); + + public: + // @abi table seedobjs i64 + struct seedobj { + uint64_t id; + time_point create; + std::string seedstr; + std::string txid; + uint64_t action; + + uint64_t primary_key()const { return id; } + EOSLIB_SERIALIZE(seedobj,(id)(create)(seedstr)(txid)(action)) + }; + typedef eosio::multi_index< N(seedobjs), seedobj> seedobjs; + + + }; + +} /// namespace eosio diff --git a/unittests/actiondemo/test.py b/unittests/actiondemo/test.py new file mode 100644 index 00000000000..5ced2b4276c --- /dev/null +++ b/unittests/actiondemo/test.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 + +import argparse +import json + +import os + +import subprocess + +import time + +args = None +logFile = None + +unlockTimeout = 999999 + +systemAccounts = [ + 'eosio.bpay', + 'eosio.msig', + 'eosio.names', + 'eosio.ram', + 'eosio.ramfee', + 'eosio.saving', + 'eosio.stake', + 'eosio.token', + 'eosio.vpay', +] + + +def jsonArg(a): + return " '" + json.dumps(a) + "' " + +def run(args): + print('testtool.py:', args) + logFile.write(args + '\n') + if subprocess.call(args, shell=True): + print('testtool.py: exiting because of error') + #sys.exit(1) + +def retry(args): + while True: + print('testtool.py:', args) + logFile.write(args + '\n') + if subprocess.call(args, shell=True): + print('*** Retry') + else: + break + +def background(args): + print('testtool.py:', args) + logFile.write(args + '\n') + return subprocess.Popen(args, shell=True) + +def getOutput(args): + print('testtool.py:', args) + logFile.write(args + '\n') + proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE) + return proc.communicate()[0].decode('utf-8') + +def getJsonOutput(args): + print('testtool.py:', args) + logFile.write(args + '\n') + proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE) + return json.loads(proc.communicate()[0].decode('utf-8')) + +def sleep(t): + print('sleep', t, '...') + time.sleep(t) + print('resume') + +def startWallet(): + run('rm -rf ' + os.path.abspath(args.wallet_dir)) + run('mkdir -p ' + os.path.abspath(args.wallet_dir)) + background(args.keosd + ' --unlock-timeout %d --http-server-address 127.0.0.1:6666 --wallet-dir %s' % (unlockTimeout, os.path.abspath(args.wallet_dir))) + sleep(4) + run(args.cleos + 'wallet create --file ./unlock.key ' ) + +def importKeys(): + run(args.cleos + 'wallet import --private-key ' + args.private_key) + +# def createStakedAccounts(b, e): +# for i in range(b, e): +# a = accounts[i] +# stake = 100 +# run(args.cleos + 'system newaccount eosio --transfer ' + a['name'] + ' ' + a['pub'] + ' --stake-net "' + stake + '" --stake-cpu "' + stake + '"') + + +def stepStartWallet(): + startWallet() + importKeys() + # run('rm -rf ~/.local/share/eosio/nodeos/data ') + run("rm -rf ./data/*") + background(args.nodeos + ' -e -p eosio --blocks-dir ./data/block/ --genesis-json %s --config-dir ./ --data-dir ./data/ --plugin eosio::http_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_plugin --plugin eosio::history_api_plugin> eos.log 2>&1 &' % args.genesis) + run("rm -rf ./data2/*") + background(args.nodeos + ' --blocks-dir ./data2/block/ --genesis-json %s --data-dir ./data2/ --config-dir ./ --p2p-peer-address 127.0.0.1:9876 --http-server-address 0.0.0.0:8001 --p2p-listen-endpoint 0.0.0.0:9001 --plugin eosio::http_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_plugin --plugin eosio::history_api_plugin > eos2.log 2>&1 &' % args.genesis) + sleep(30) + + +def createAccounts(): + for a in systemAccounts: + run(args.cleos + 'create account eosio ' + a + ' ' + args.public_key) + run(args.cleos + 'set contract eosio.token ' + args.contracts_dir + 'eosio.token/') + run(args.cleos + 'set contract eosio.msig ' + args.contracts_dir + 'eosio.msig/') + run(args.cleos + 'push action eosio.token create \'["eosio", "10000000000.0000 %s"]\' -p eosio.token' % (args.symbol)) + run(args.cleos + 'push action eosio.token issue \'["eosio", "%s %s", "memo"]\' -p eosio' % ("1000000.0000", args.symbol)) + retry(args.cleos + 'set contract eosio ' + args.contracts_dir + 'eosio.system/ -p eosio') + sleep(1) + run(args.cleos + 'push action eosio setpriv' + jsonArg(['eosio.msig', 1]) + '-p eosio@active') + + for a in accounts: + run(args.cleos + 'system newaccount --stake-net "10.0000 %s" --stake-cpu "10.0000 %s" --buy-ram-kbytes 80 eosio ' %(args.symbol,args.symbol) + a + ' ' + args.public_key) + + run(args.cleos + 'system newaccount --stake-net "10.0000 %s" --stake-cpu "10.0000 %s" --buy-ram-kbytes 80 eosio '%(args.symbol,args.symbol) + 'cochaintoken' + ' ' + args.public_key) + + run(args.cleos + 'system buyram eosio %s -k 80000 -p eosio ' % args.contract ) + run(args.cleos + 'system delegatebw eosio %s "1000.0000 SYS" "1000.0000 SYS"'% args.contract ) + + run(args.cleos + 'system buyram eosio %s -k 80000 -p eosio ' % args.contract2 ) + run(args.cleos + 'system delegatebw eosio %s "1000.0000 SYS" "1000.0000 SYS"'% args.contract2 ) + +# stepIssueToken() +# +# +# def stepIssueToken(): +# run(args.cleos + 'push action eosio.token issue \'["eosio", "%s %s", "memo"]\' -p eosio' % ("1000000.0000", args.symbol)) +# for i in accounts: +# run(args.cleos + 'push action eosio.token issue \'["%s", "%s %s", "memo"]\' -p eosio' % (i, "1000000.0000", args.symbol)) +# +# sleep(1) + + +def stepKillAll(): + run('killall keosd nodeos || true') + sleep(1.5) +# Command Line Arguments + +def stepInitCaee(): + print ("=========================== set contract caee ===========================" ) + run(args.cleos + 'set contract %s ../actiondemo' %args.contract ) + run(args.cleos + 'set contract %s ../actiondemo' %args.contract2 ) + run(args.cleos + 'set account permission %s active \'{"threshold": 1,"keys": [{"key": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","weight": 1}],"accounts": [{"permission":{"actor":"%s","permission":"eosio.code"},"weight":1}]}\' ' % (args.contract,args.contract)) + print ("sleep 5") + + +def stepClear(): + print ("=========================== set contract clear ===========================" ) + run(args.cleos + 'push action %s clear "[]" -p %s ' %(args.contract, args.contract)) + run(args.cleos + 'get table %s %s seedobjs' %(args.contract, args.contract) ) + run(args.cleos + 'push action %s clear "[]" -p %s ' %(args.contract2, args.contract2)) + run(args.cleos + 'get table %s %s seedobjs' %(args.contract2, args.contract2) ) + print ("sleep 5") + + +def stepGenerate(): + print ("=========================== set contract stepGenerate ===========================" ) + # run(args.cleos + 'push action %s generate \'[{"loop":1, "num":1}]\' -p %s ' %(args.contract, args.contract)) + run(args.cleos + 'push action %s inlineact \'[{"payer":"%s", "in":"%s"}]\' -p %s ' %(args.contract,args.contract,args.contract2, args.contract)) + run(args.cleos + 'get table %s %s seedobjs' %(args.contract, args.contract) ) + run(args.cleos + 'get table %s %s seedobjs' %(args.contract2, args.contract2) ) + print ("sleep 5") + + +parser = argparse.ArgumentParser() + +commands = [ + ('k', 'kill', stepKillAll, True, ""), + ('w', 'wallet', stepStartWallet, True, "Start keosd, create wallet"), + ('s', 'sys', createAccounts, True, "Create all accounts"), + ('i', 'init', stepInitCaee, True, "stepInitCaee"), + ('c', 'clear', stepClear, True, "stepInitCaee"), + ('g', 'generate', stepGenerate, True, "stepInitCaee"), +] + +parser.add_argument('--public-key', metavar='', help="EOSIO Public Key", default='EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV', dest="public_key") +parser.add_argument('--private-Key', metavar='', help="EOSIO Private Key", default='5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3', dest="private_key") +parser.add_argument('--cleos', metavar='', help="Cleos command", default='../../build/programs/cleos/cleos --wallet-url http://127.0.0.1:6666 ') +parser.add_argument('--nodeos', metavar='', help="Path to nodeos binary", default='../../build/programs/nodeos/nodeos ') +parser.add_argument('--keosd', metavar='', help="Path to keosd binary", default='../../build/programs/keosd/keosd ') +parser.add_argument('--contracts-dir', metavar='', help="Path to contracts directory", default='../../build/contracts/') +parser.add_argument('--nodes-dir', metavar='', help="Path to nodes diretodctory", default='./') +parser.add_argument('--genesis', metavar='', help="Path to genesis.json", default="./genesis.json") +parser.add_argument('--wallet-dir', metavar='', help="Path to wallet directory", default='./wallet/') +parser.add_argument('--log-path', metavar='', help="Path to log file", default='./output.log') +# parser.add_argument('--symbol', metavar='', help="The eosio.system symbol", default='SYS') +parser.add_argument('-a', '--all', action='store_true', help="Do everything marked with (*)") +#parser.add_argument('-H', '--http-port', type=int, default=8888, metavar='', help='HTTP port for cleos') + +for (flag, command, function, inAll, help) in commands: + prefix = '' + if inAll: prefix += '*' + if prefix: help = '(' + prefix + ') ' + help + if flag: + parser.add_argument('-' + flag, '--' + command, action='store_true', help=help, dest=command) + else: + parser.add_argument('--' + command, action='store_true', help=help, dest=command) + +args = parser.parse_args() + +args.cleos += '--url http://127.0.0.1:8888 ' +args.symbol = 'SYS' +args.contract = 'caee' +args.contract2 = 'caee2' + + +accnum = 26 +accounts = [] +# for i in range(97,97+accnum): +# accounts.append("user%c"% chr(i)) +# accounts.append("payman") +accounts.append(args.contract) +accounts.append(args.contract2) + +logFile = open(args.log_path, 'a') +logFile.write('\n\n' + '*' * 80 + '\n\n\n') + +haveCommand = False +for (flag, command, function, inAll, help) in commands: + if getattr(args, command) or inAll and args.all: + if function: + haveCommand = True + function() +if not haveCommand: + print('testtool.py: Tell me what to do. -a does almost everything. -h shows options.') \ No newline at end of file diff --git a/unittests/database_gmr_blklst_tests.cpp b/unittests/database_gmr_blklst_tests.cpp new file mode 100644 index 00000000000..f448ba5a172 --- /dev/null +++ b/unittests/database_gmr_blklst_tests.cpp @@ -0,0 +1,309 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef NON_VALIDATING_TEST +#define TESTER tester +#else +#define TESTER validating_tester +#endif + +using namespace eosio::chain; +using namespace eosio::testing; +namespace bfs = boost::filesystem; + +BOOST_AUTO_TEST_SUITE(database_gmr_blklst_tests) + +vector parse_list_string(string items) +{ + vector item_list; + vector itemlist; + boost::split(itemlist, items, boost::is_any_of(",")); + for (string item : itemlist) + { + item_list.push_back(string_to_name(item.c_str())); + } + + return item_list; +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(list_config_parse_test) +{ + try + { + TESTER test; + + string str = "alice,bob,tom"; + vector list = parse_list_string(str); + BOOST_TEST(list.size() > 0); + account_name n = N(a); + if (list.size() > 0) + { + n = *(list.begin()); + } + + BOOST_TEST(n != N(a)); + BOOST_TEST(n == N(alice)); + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(set_name_list_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + string str = "alice,bob,tom"; + vector list = parse_list_string(str); + + flat_set nameset(list.begin(), list.end()); + + test.control->set_actor_blacklist(nameset); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.cfg.actor_blacklist = {N(a)}; + a.cfg.contract_blacklist = {N(a)}; + a.cfg.resource_greylist = {N(a)}; + }); + + int64_t lt = static_cast(list_type::actor_blacklist_type); + int64_t lat = static_cast(list_action_type::insert_type); + test.control->set_name_list(lt, lat, list); + + + + + const flat_set& ab = test.control->get_actor_blacklist(); + const flat_set& cb = test.control->get_contract_blacklist(); + const flat_set& rg = test.control->get_resource_greylist(); + + + + + auto convert_names = [&](const shared_vector& namevec, flat_set& nameset) -> void { + for(const auto& a :namevec) + { + nameset.insert(uint64_t(a)); + } + }; + + flat_set aab; + flat_set acb; + flat_set arg; + + const global_property2_object &ptr1 = db.get(); + chain_config2 c = ptr1.cfg; + + BOOST_TEST(c.actor_blacklist.size() == 4); + BOOST_TEST(ab.size() == 4); + + convert_names(c.actor_blacklist, aab); + convert_names(c.contract_blacklist, acb); + convert_names(c.resource_greylist, arg); + + + if (c.actor_blacklist.size() == 4) + { + + bool b = (aab.find(N(a)) != aab.end()); + BOOST_TEST(b); + } + + bool d = ab.find(N(a)) != ab.end(); + BOOST_TEST(d); + bool m = aab.find(N(alice)) != aab.end(); + BOOST_TEST(m); + + // Undo creation of the account + ses.undo(); + + + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(actor_blacklist_config_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + // string str= "alice,bob,tom"; + // vector list = parse_list_string(str); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.cfg.actor_blacklist = {N(a)}; + }); + + chain_config2 a = ptr.cfg; + + account_name v; + if (a.actor_blacklist.size() > 0) + { + v = *(a.actor_blacklist.begin()); + } + + std::size_t s = a.actor_blacklist.size(); + + BOOST_TEST(1 == s); + + BOOST_TEST(v == N(a)); + + // Undo creation of the account + ses.undo(); + + + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(contract_blacklist_config_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + // string str= "alice,bob,tom"; + // vector list = parse_list_string(str); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.cfg.contract_blacklist = {N(a)}; + }); + + chain_config2 a = ptr.cfg; + + account_name v ; + if (a.contract_blacklist.size() > 0) + { + v = *(a.contract_blacklist.begin()); + } + + std::size_t s = a.contract_blacklist.size(); + + BOOST_TEST(1 == s); + + BOOST_TEST(v == N(a)); + + // Undo creation of the account + ses.undo(); + + + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(resource_greylist_config_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + // string str= "alice,bob,tom"; + // vector list = parse_list_string(str); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.cfg.resource_greylist = {N(a)}; + }); + + chain_config2 a = ptr.cfg; + + account_name v ; + if (a.resource_greylist.size() > 0) + { + v = *(a.resource_greylist.begin()); + } + + std::size_t s = a.resource_greylist.size(); + + BOOST_TEST(1 == s); + + BOOST_TEST(v == N(a)); + + // Undo creation of the account + ses.undo(); + + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(gmrource_limit_config_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.gmr.cpu_us = 100; + a.gmr.net_byte = 1024; + a.gmr.ram_byte = 1; + }); + + BOOST_TEST(ptr.gmr.cpu_us == 100); + BOOST_TEST(ptr.gmr.net_byte == 1024); + BOOST_TEST(ptr.gmr.ram_byte == 1); + + // Undo creation of the account + ses.undo(); + + + } + FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/database_tests.cpp b/unittests/database_tests.cpp index ac97f6c21a6..8f9d3553928 100644 --- a/unittests/database_tests.cpp +++ b/unittests/database_tests.cpp @@ -28,7 +28,8 @@ BOOST_AUTO_TEST_SUITE(database_tests) TESTER test; // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. - eosio::chain::database& db = const_cast( test.control->db() ); + // eosio::chain::database& db = const_cast( test.control->db() ); + chainbase::database &db = const_cast(test.control->db()); auto ses = db.start_undo_session(true); diff --git a/unittests/gmr_test.cpp b/unittests/gmr_test.cpp new file mode 100644 index 00000000000..3874552d0e5 --- /dev/null +++ b/unittests/gmr_test.cpp @@ -0,0 +1,234 @@ +#include +#include +#include +#include +#include +#include + +#include +#ifdef NON_VALIDATING_TEST +#define TESTER tester +#else +#define TESTER validating_tester +#endif + +using namespace eosio::chain::resource_limits; +using namespace eosio::testing; +using namespace eosio::chain; + +class gmr_fixture : private chainbase_fixture<512 * 1024>, public resource_limits_manager +{ + public: + gmr_fixture() + : chainbase_fixture(), resource_limits_manager(*chainbase_fixture::_db) + { + add_indices(); + initialize_database(); + } + + ~gmr_fixture() {} + + chainbase::database::session start_session() + { + return chainbase_fixture::_db->start_undo_session(true); + } +}; + +BOOST_AUTO_TEST_SUITE(gmr_test) + +BOOST_FIXTURE_TEST_CASE(check_block_limits_cpu, gmr_fixture) +try +{ + const account_name account(1); + const uint64_t increment = 10000; + initialize_account(account); + set_account_limits(account, 1000, 0, 0); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 0, 10000); + set_account_limits(N(everyone), 0, 0, 10000000000000ll); + + + process_account_limit_updates(); + + // uint16_t gmrource_limit_per_day = 100; + + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + + // test.control->startup(); + + // // Make sure we can no longer find + + const uint64_t expected_iterations = config::default_gmr_cpu_limit / increment; + + for (int idx = 0; idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, increment, 0, 0); + process_block_usage(idx); + } + + auto arl = get_account_cpu_limit_ex(account, true); + + BOOST_TEST(arl.available >= 9997); + BOOST_REQUIRE_THROW(add_transaction_usage({account}, increment, 0, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + +BOOST_FIXTURE_TEST_CASE(check_block_limits_cpu_lowerthan, gmr_fixture) +try +{ + const account_name account(1); + const uint64_t increment = 10000; + initialize_account(account); + set_account_limits(account, increment, 0, 0); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 0, 10000); + set_account_limits(N(everyone), 0, 0, 10000000000000ll); + process_account_limit_updates(); + + const uint64_t expected_iterations = config::default_gmr_cpu_limit / increment; + + for (int idx = 0; idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, increment, 0, 0); + process_block_usage(idx); + } + + auto arl = get_account_cpu_limit_ex(account, true); + BOOST_TEST(arl.available >= 9997); + + // BOOST_REQUIRE_THROW(add_transaction_usage({account}, increment, 0, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + +BOOST_FIXTURE_TEST_CASE(check_block_limits_net_lowerthan, gmr_fixture) +try +{ + const account_name account(1); + const uint64_t increment = 1000; + initialize_account(account); + set_account_limits(account, increment, 0, 0); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 10000, 0); + set_account_limits(N(everyone), 0, 10000000000000ll, 0); + process_account_limit_updates(); + + const uint64_t expected_iterations = config::default_gmr_net_limit / increment; + + for (int idx = 0; idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, 0, increment, 0); + process_block_usage(idx); + } + + auto arl = get_account_net_limit_ex(account, true); + BOOST_TEST(arl.available >= 1238); + + // BOOST_REQUIRE_THROW(add_transaction_usage({account}, 0,increment, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + +BOOST_FIXTURE_TEST_CASE(check_block_limits_ram, gmr_fixture) +try +{ + set_gmr_parameters( + { 1024, 200000,10240} + ); + + const account_name account(1); + const uint64_t increment = 1000; + initialize_account(account); + set_account_limits(account, increment, 10, 10); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 10000, 0); + set_account_limits(N(everyone), 0, 10000000000000ll, 0); + process_account_limit_updates(); + + const uint64_t expected_iterations = config::default_gmr_net_limit / increment; + + //for ( + int idx = 0;// idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, 0, increment, 0); + process_block_usage(idx); + } + + auto arl = get_account_net_limit_ex(account, true); + BOOST_TEST(arl.available >= 0); + + int64_t ram_bytes; + int64_t net_weight; + int64_t cpu_weight; + bool raw = false; + get_account_limits(account, ram_bytes, net_weight, cpu_weight, raw); + + BOOST_TEST(1024*2 == ram_bytes); + BOOST_TEST(10 == net_weight); + BOOST_TEST(10 == cpu_weight); + + + raw = true; + get_account_limits(account, ram_bytes, net_weight, cpu_weight, raw); + + BOOST_TEST(1024 == ram_bytes); + BOOST_TEST(10 == net_weight); + BOOST_TEST(10 == cpu_weight); + + // BOOST_REQUIRE_THROW(add_transaction_usage({account}, 0,increment, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + + + +BOOST_FIXTURE_TEST_CASE(get_account_limits_res, gmr_fixture) +try +{ + const account_name account(1); + const uint64_t increment = 1000; + initialize_account(account); + set_account_limits(account, increment+24, 0, 0); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 10000, 0); + set_account_limits(N(everyone), 0, 10000000000000ll, 0); + process_account_limit_updates(); + + const uint64_t expected_iterations = config::default_gmr_net_limit / increment; + + for (int idx = 0; idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, 0, increment, 0); + process_block_usage(idx); + } + + auto arl = get_account_net_limit_ex(account, true); + BOOST_TEST(arl.available > 0); + + int64_t ram_bytes; + int64_t net_weight; + int64_t cpu_weight; + bool raw = false; + get_account_limits(account, ram_bytes, net_weight, cpu_weight, raw); + + BOOST_TEST(1024 == ram_bytes); + BOOST_TEST(0 == net_weight); + BOOST_TEST(0 == cpu_weight); + + + raw = true; + get_account_limits(account, ram_bytes, net_weight, cpu_weight, raw); + + BOOST_TEST(1024 == ram_bytes); + BOOST_TEST(0 == net_weight); + BOOST_TEST(0 == cpu_weight); + + + // BOOST_REQUIRE_THROW(add_transaction_usage({account}, 0,increment, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + + +BOOST_AUTO_TEST_SUITE_END() From 100ecf46a1d875d17ebc5e3682626f866d2a7f5e Mon Sep 17 00:00:00 2001 From: thaipanda <45444502+Thaipanda@users.noreply.github.com> Date: Wed, 12 Dec 2018 19:53:58 +0800 Subject: [PATCH 02/21] Release/1.0.x (#11) * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * add README files * update info --- README.md | 40 ++++++++++++++++++++++++---------------- README_CN.md | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 16 deletions(-) create mode 100644 README_CN.md diff --git a/README.md b/README.md index d1a66f70a0e..cf1f8585ee5 100644 --- a/README.md +++ b/README.md @@ -1,29 +1,37 @@ -# BOSCore - 更可用的链,为DApp而生。Born for DApp, be more useable. +# BOSCore - Born for DApp, be more useable. ## BOSCore Version: v1.0.1 -### Basic EOSIO Version: v1.4.4 +### Basic EOSIO Version: v1.4.3 -# 背景 -EOS的出现给区块链带来了新的想象力,主网启动短短几个月以来,版本经历了几十次升级,不仅稳定性得到了很大提高,并且新功能也逐步实现,各个节点团队也积极参与建设EOSIO生态。让人更加兴奋的是,EOS已经吸引了越来越多的开发团队,当前已经有数百个DApp在EOS主网上面运行,其交易量和流通市值远超以太坊,可发展的空间愈来愈广阔。 -在EOS主网逐渐发展的过程中,我们发现了一些偏离期望的地方。作为最有竞争力的第三代公链,大家希望看到的是能够有更多、更丰富的应用能够在EOS上面运行,开发者会将EOS作为自己应用开发的首选平台,但是由于目前EOS的资源模型的限制,导致了很高的使用成本,包括为用户创建更多的账户,以及部署运营DApp需要的较高成本。针对白皮书中要实现的上百万TPS需要的关键技术IBC,一直没有进行推进,主网多次出现CPU计算资源不足的情况,更是加剧了对跨链通讯需求的迫切性。此外,由于EOSIO采用的Pipeline-DPOS共识机制,一个交易需要近三分钟才能保证不可更改,虽然相较比特币、以太坊是有很大的进步,但是这也给EOS的应用场景带来很大限制,快速支付只能聚焦于小额转账,大额转账必须要等待足够长的时间才能保证不可更改,这就限制了链上、链下用户支付体验。 -除了上面提到的情况,还有很多其他改进想法一直在我们社区进行活跃的讨论,由此,我们觉得应该基于EOS进行更多的尝试,让更多的开发者或者团队来参与到EOSIO生态的建设中来,一起为区块链在不同行业不同场景中的落地做出一份努力。BOS作为一条完全由社区维护的EOS侧链,在继承其良好功能的基础上,会进行更多的尝试,并且会将经过验证的新特性、新功能反哺给EOSIO生态。 +# Background +The emergence of EOS has brought new imagination to the blockchain. In just a few months since the main network was launched, the version has undergone dozens of upgrades, not only the stability has been greatly improved, but also the new functions have been gradually realized. The node team is also actively involved in building the EOSIO ecosystem. What is even more exciting is that EOS has attracted more and more development teams. There are already hundreds of DApp running on the EOS main network. The transaction volume and circulation market value far exceed Ethereum, and the space for development is growing broader. +During the gradual development of the EOS main network, we found some deviations from expectations. As the most competitive third-generation public chain, we look forward to seeing more and more applications running on EOS. Developers will use EOS as their preferred platform for application development. But due to the limitations of the current EOS resource model, higher cost of use including creating more accounts for users and deploying operating DApp. The key technology IBC needed for the millions of TPS to be realized in the white paper has not been promoted. The main network has repeatedly experienced insufficient CPU computing resources, which has intensified the urgency of the demand for cross-chain communication. In addition, due to the Pipeline-DPOS consensus algorithm adopted by EOSIO, a transaction takes nearly three minutes to ensure that it cannot be changed. Although it is much better than Bitcoin and Ethereum, it also brings restrictions to a lot of EOS application scenarios. Fast payment can only focus on small transfers, large transfers must wait long enough to ensure that they cannot be changed, which limits the payment experience of users on the chain and under the chain. +In addition to the above mentioned, there are many other improvements that have been actively discussed in our community. From this, we feel that we should try more on EOS and let more developers or teams participate in the construction of EOSIO ecosystem. we will together make efforts for the blockchain to land in different scenarios in different industries. As a fully community-maintained EOS side chain, BOS will make more attempts based on its inherited good functions and will feed back to the EOSIO ecosystem its proven new features and functions. -# 概述 -BOS致力于为用户提供方便进入并易于使用的区块链服务,为DApp运营提供更友好的基础设施,为支持更丰富的应用场景努力,为DApp大繁荣进行积极尝试。除了技术改进以外,BOS也会进行其他方面的尝试。比如,为了提高用户投票参与度,可以通过预言机技术来针对符合明确规则的账户进行激励;BOS上面的BP的奖励会根据链上DApp的数量、TPS、市值、流通量等指标进行调整,鼓励每个BP为生态提供更多资源;一项社区公投达成的决议将会尽量被代码化,减少人为的因素在里面,流程上链,保持公正透明。 -BOS链的代码完全由社区贡献并维护,每个生态参与者都可以提交代码或者建议,相关的流程会参考已有开源软件来进行,比如PEP(Python Enhancement Proposals)。 -为鼓励DApp在BOS的发展,BOS基金会将会为其上的DApp提供Token置换的低成本的资源抵押服务,降低DApp前期的运营成本;此外还会定期对做出贡献的开发者或者功能验证者提供BOS激励,以便建立起一个相互促进的社区发展趋势。 +# Overview +BOS is committed to providing users with easy-to-access and easy-to-use blockchain services, providing a more user-friendly infrastructure for DApp operations, working to support richer application scenarios, and actively experimenting with DApp booms. In addition to technical improvements, BOS will also try other aspects. For example, in order to increase the participation of users in voting, estimator technology can be used to motivate accounts that meet clear rules. The rewards of BP on BOS will be adjusted according to the number of DApp on the chain, TPS, market value, liquidity and other indicators. Each BP is an encouragement for providing more resources for the ecology. A resolution reached by a community referendum will be coded as much as possible, to reduce human factors in the process, keep the process on chain, and maintain fairness and transparency. +The codes of the BOS chain are fully contributed and maintained by the community. Each ecological participant can submit codes or suggestions. The related process will refer to existing open source software, such as PEP (Python Enhancement Proposals). +In order to encourage the development of DApp in BOS, the BOS Foundation will provide Token replacement of low-cost resource mortgage services for DApp in BOS, reduce the operating costs of DApp in the early stage; in addition, it will also regularly provide BOS incentives to developers who contribute on a regular basis in order to establish a mutually reinforcing community development trend. -## 资源 +# Developer Rewards + +An additional 0.8% issuance will be given to the BOS eco-contribution code developer every year. Fifty candidates will be nominated by the community. Top 50 BPs vote 40 winners to get the awards: the top 10 share 40%, people ranked 11 to 20 share 30%, the last 20 share the remaining 30% evenly. The reward happens once every 3 months and each reward will be carried out with a one-week publicity. It will be re-evaluated if there is a reasonable objection. And each reward list will be recorded on chain. +As BOS continues to develop, developer rewards will be appropriately adjusted to allow the community to provide more momentum for the evolution of BOS. + +## Links 1. [Website](https://boscore.io) -2. [Developer Telegram Group](https://t.me/BOSCoreProject) +2. [Developer Telegram Group](https://t.me/BOSCoreDev) +3. [WhitePaper](https://github.com/boscore/Documentation/blob/master/BOSCoreTechnicalWhitePaper.md) +4. [白皮书](https://github.com/boscore/Documentation/blob/master/zh-CN/BOSCoreTechnicalWhitePaper.md) -## 开始 -1. 源码直接编译: `bash ./eosio_build.sh` -2. Docker方式部署,参看 [Docker](./Docker/README.md) +## Start +1. Build from code : `bash ./eosio_build.sh -s BOS` +2. Docker Style,check [Docker](./Docker/README.md) -BOSCore是基于EOSIO技术的扩展,所以EOSIO的相关资料也可以参考: +BOSCore bases on EOSIO, so you can also referer: [Getting Started](https://developers.eos.io/eosio-nodeos/docs/overview-1) [EOSIO Developer Portal](https://developers.eos.io). + diff --git a/README_CN.md b/README_CN.md new file mode 100644 index 00000000000..83d97b25226 --- /dev/null +++ b/README_CN.md @@ -0,0 +1,36 @@ +# BOSCore - 更可用的链,为DApp而生。 + +## BOSCore Version: v1.0.1 +### Basic EOSIO Version: v1.4.3 + +# 背景 +EOS的出现给区块链带来了新的想象力,主网启动短短几个月以来,版本经历了几十次升级,不仅稳定性得到了很大提高,并且新功能也逐步实现,各个节点团队也积极参与建设EOSIO生态。让人更加兴奋的是,EOS已经吸引了越来越多的开发团队,当前已经有数百个DApp在EOS主网上面运行,其交易量和流通市值远超以太坊,可发展的空间愈来愈广阔。 +在EOS主网逐渐发展的过程中,我们发现了一些偏离期望的地方。作为最有竞争力的第三代公链,大家希望看到的是能够有更多、更丰富的应用能够在EOS上面运行,开发者会将EOS作为自己应用开发的首选平台,但是由于目前EOS的资源模型的限制,导致了很高的使用成本,包括为用户创建更多的账户,以及部署运营DApp需要的较高成本。针对白皮书中要实现的上百万TPS需要的关键技术IBC,一直没有进行推进,主网多次出现CPU计算资源不足的情况,更是加剧了对跨链通讯需求的迫切性。此外,由于EOSIO采用的Pipeline-DPOS共识机制,一个交易需要近三分钟才能保证不可更改,虽然相较比特币、以太坊是有很大的进步,但是这也给EOS的应用场景带来很大限制,快速支付只能聚焦于小额转账,大额转账必须要等待足够长的时间才能保证不可更改,这就限制了链上、链下用户支付体验。 +除了上面提到的情况,还有很多其他改进想法一直在我们社区进行活跃的讨论,由此,我们觉得应该基于EOS进行更多的尝试,让更多的开发者或者团队来参与到EOSIO生态的建设中来,一起为区块链在不同行业不同场景中的落地做出一份努力。BOS作为一条完全由社区维护的EOS侧链,在继承其良好功能的基础上,会进行更多的尝试,并且会将经过验证的新特性、新功能反哺给EOSIO生态。 + +# 概述 +BOS致力于为用户提供方便进入并易于使用的区块链服务,为DApp运营提供更友好的基础设施,为支持更丰富的应用场景努力,为DApp大繁荣进行积极尝试。除了技术改进以外,BOS也会进行其他方面的尝试。比如,为了提高用户投票参与度,可以通过预言机技术来针对符合明确规则的账户进行激励;BOS上面的BP的奖励会根据链上DApp的数量、TPS、市值、流通量等指标进行调整,鼓励每个BP为生态提供更多资源;一项社区公投达成的决议将会尽量被代码化,减少人为的因素在里面,流程上链,保持公正透明。 +BOS链的代码完全由社区贡献并维护,每个生态参与者都可以提交代码或者建议,相关的流程会参考已有开源软件来进行,比如PEP(Python Enhancement Proposals)。 +为鼓励DApp在BOS的发展,BOS基金会将会为其上的DApp提供Token置换的低成本的资源抵押服务,降低DApp前期的运营成本;此外还会定期对做出贡献的开发者提供BOS激励,以便建立起一个相互促进的社区发展趋势。 + +# 开发者激励 +每年增发 0.8% 面向BOS生态贡献代码的开发者,由社区提出50名奖励名单,由前50名BP投票选出40名的获奖者获取对应奖励:前10名获取40%,11到20名获取30%,最后20名均分30%,奖励周期3个月一次,每次奖励名额都会进行为期一周的公示,如果有合理异议,将会重新评审,每次奖励名单都会上链记录。 +随着BOS的不断发展,开发者奖励会适当调整,让社区为BOS的进化提供更多动力。 + + +## 资源 +1. [官网](https://boscore.io) +2. [开发者社群](https://t.me/BOSCoreDev) +3. [WhitePaper](https://github.com/boscore/Documentation/blob/master/BOSCoreTechnicalWhitePaper.md) +4. [白皮书](https://github.com/boscore/Documentation/blob/master/zh-CN/BOSCoreTechnicalWhitePaper.md) + +## 开始 +1. 源码直接编译: `bash ./eosio_build.sh -s BOS` +2. Docker方式部署,参看 [Docker](./Docker/README.md) + +BOSCore是基于EOSIO技术的扩展,所以EOSIO的相关资料也可以参考: + +[EOSIO 开始](https://developers.eos.io/eosio-nodeos/docs/overview-1) + +[EOSIO 开发者门户](https://developers.eos.io). + From b7d7bdb885a36e854480501eee249e6a199497a7 Mon Sep 17 00:00:00 2001 From: Wirte Code <45449488+wirtecode@users.noreply.github.com> Date: Wed, 12 Dec 2018 19:54:45 +0800 Subject: [PATCH 03/21] readme for kafka & add time for action (#5) --- plugins/kafka_plugin/kafka.cpp | 1 + plugins/kafka_plugin/readme.md | 31 +++++++++++++++++++++++++++++++ plugins/kafka_plugin/types.hpp | 3 ++- 3 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 plugins/kafka_plugin/readme.md diff --git a/plugins/kafka_plugin/kafka.cpp b/plugins/kafka_plugin/kafka.cpp index 7eee52b8386..86abbb6d62c 100644 --- a/plugins/kafka_plugin/kafka.cpp +++ b/plugins/kafka_plugin/kafka.cpp @@ -148,6 +148,7 @@ void kafka::push_action(const chain::action_trace& action_trace, uint64_t parent a->code_seq = action_trace.receipt.code_sequence; a->abi_seq = action_trace.receipt.abi_sequence; a->block_num = action_trace.block_num; + a->block_time = action_trace.block_time; a->tx_id = checksum_bytes(action_trace.trx_id); if (not action_trace.console.empty()) a->console = action_trace.console; diff --git a/plugins/kafka_plugin/readme.md b/plugins/kafka_plugin/readme.md new file mode 100644 index 00000000000..ecde9474053 --- /dev/null +++ b/plugins/kafka_plugin/readme.md @@ -0,0 +1,31 @@ +# Kafka Plugin 说明 + +### 一、Topic + +默认开启以下4个topic + + 1. blocks // 其中block字段是由完整区块数据持久化的json结构,是一份全量数据。 + 2. transaction + 3. transaction_trace + 4. action + + transaction、transaction_trace、action为nodeos中数据解析所得,提取了主要的可能使用的字段(相当于推荐配置),业务使用者可根据需要适当增减字段。 另,也可以删除这三个topic,仅依赖blocks中的全量数据。 + + 详见:`plugins/kafka_plugin/types.hpp ` + + +### 二、常见问题 + +#### bos在Mac上编译常见报错 +``` +Could not find a package configuration file provided by "RdKafka" with any of the following names: + RdKafkaConfig.cmake + rdkafka-config.cmake +``` + +原因:系统安装的kafka版本太低 + +解决方法: + + 删除`/usr/local/include/cppkafka` , `/usr/local/include/librdkafka`两个目录 + 重新开始bos编译(会自动下载安装适配的kafka版本) \ No newline at end of file diff --git a/plugins/kafka_plugin/types.hpp b/plugins/kafka_plugin/types.hpp index a419f4b8c2b..65f4091fb1f 100644 --- a/plugins/kafka_plugin/types.hpp +++ b/plugins/kafka_plugin/types.hpp @@ -73,6 +73,7 @@ struct Action { unsigned abi_seq; uint32_t block_num; + block_timestamp_type block_time; bytes tx_id; // the transaction that generated this action string console; @@ -90,4 +91,4 @@ FC_REFLECT_ENUM(kafka::TransactionStatus, (executed)(soft_fail)(hard_fail)(delay FC_REFLECT(kafka::Block, (id)(num)(timestamp)(lib)(block)(tx_count)(action_count)(context_free_action_count)) FC_REFLECT(kafka::Transaction, (id)(block_id)(block_num)(block_time)(block_seq)(action_count)(context_free_action_count)) FC_REFLECT(kafka::TransactionTrace, (id)(block_num)(scheduled)(status)(net_usage_words)(cpu_usage_us)(exception)) -FC_REFLECT(kafka::Action, (global_seq)(recv_seq)(parent_seq)(account)(name)(auth)(data)(receiver)(auth_seq)(code_seq)(abi_seq)(block_num)(tx_id)(console)) +FC_REFLECT(kafka::Action, (global_seq)(recv_seq)(parent_seq)(account)(name)(auth)(data)(receiver)(auth_seq)(code_seq)(abi_seq)(block_num)(block_time)(tx_id)(console)) From 4260006abad338e7cff35e6ce5cfc6fb742bfd42 Mon Sep 17 00:00:00 2001 From: vlbos <45447465+vlbos@users.noreply.github.com> Date: Wed, 12 Dec 2018 19:55:06 +0800 Subject: [PATCH 04/21] =?UTF-8?q?=E9=87=8D=E5=90=AF=20=E8=8A=82=E7=82=B9,?= =?UTF-8?q?=E9=BB=91=E5=90=8D=E5=8D=95=20=E5=A4=B1=E6=95=88=EF=BC=8Cfixes?= =?UTF-8?q?=20#7=20(#8)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * restart sync list db * recovery system account bos to eosio * recovery system account bos to eosio * recovery system account bos to eosio --- libraries/chain/controller.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 6bea61766be..cb824cf45ba 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -394,7 +394,11 @@ struct controller_impl { ilog( "database initialized with hash: ${hash}", ("hash", hash) ); } - + //*bos begin* + sync_name_list(list_type::actor_blacklist_type,true); + sync_name_list(list_type::contract_blacklist_type,true); + sync_name_list(list_type::resource_greylist_type,true); + //*bos end* } ~controller_impl() { @@ -649,9 +653,7 @@ struct controller_impl { gpo.gmr.ram_byte = config::default_gmr_ram_limit; }); - sync_name_list(list_type::actor_blacklist_type,true); - sync_name_list(list_type::contract_blacklist_type,true); - sync_name_list(list_type::resource_greylist_type,true); + // *bos end* authorization.initialize_database(); From 6935bf6f51aa678fbb64249eb82383bb66f53ef9 Mon Sep 17 00:00:00 2001 From: montecarlomind <45424199+montecarlomind@users.noreply.github.com> Date: Wed, 12 Dec 2018 20:00:17 +0800 Subject: [PATCH 05/21] Fix/#3 notify plugin (#10) * Add debug info * comment log * rm log for notify_plugin --- plugins/notify_plugin/README.md | 22 +++++++++++----------- plugins/notify_plugin/notify_plugin.cpp | 11 ++++++++++- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/plugins/notify_plugin/README.md b/plugins/notify_plugin/README.md index 35080a4962d..b962c6e0ead 100644 --- a/plugins/notify_plugin/README.md +++ b/plugins/notify_plugin/README.md @@ -11,7 +11,7 @@ Add some configs to your `config.ini` just as follows: plugin = eosio::notify_plugin # notify-filter-on = account:action notify-filter-on = b1: -notify-filter-on = noprom:transfer +notify-filter-on = b1:transfer notify-filter-on = eosio:delegatebw # http endpoint for each action seen on the chain. notify-receive-url = http://127.0.0.1:8080/notify @@ -32,38 +32,38 @@ And you can receive the actions on chain by watching your server endpoint: `http "account": "eosio.token", "name": "transfer", "seq_num": 1, - "receiver": "noprom", + "receiver": "b1", "block_time": "2018-09-29T11:51:06.000", "block_num": 127225, "authorization": [{ - "actor": "noprom", + "actor": "b1", "permission": "active" } ], "action_data": { - "from": "noprom", - "to": "noprom1", + "from": "b1", + "to": "b11", "quantity": "0.0001 EOS", - "memo": "Transfer from noprom to xiaoming" + "memo": "Transfer from b1 to b11" } },{ "tx_id": "b31885bada6c2d5e71b1302e87d4006c59ff2a40a12108559d76142548d8cf79", "account": "eosio.token", "name": "transfer", "seq_num": 2, - "receiver": "noprom1", + "receiver": "b11", "block_time": "2018-09-29T11:51:06.000", "block_num": 127225, "authorization": [{ - "actor": "noprom", + "actor": "b1", "permission": "active" } ], "action_data": { - "from": "noprom", - "to": "noprom1", + "from": "b1", + "to": "b11", "quantity": "0.0001 EOS", - "memo": "Transfer from noprom to xiaoming" + "memo": "Transfer from b1 to b11" } } ] diff --git a/plugins/notify_plugin/notify_plugin.cpp b/plugins/notify_plugin/notify_plugin.cpp index 65e728e9bbe..377daf8e5ef 100644 --- a/plugins/notify_plugin/notify_plugin.cpp +++ b/plugins/notify_plugin/notify_plugin.cpp @@ -134,6 +134,7 @@ fc::variant notify_plugin_impl::deserialize_action_data(action act) void notify_plugin_impl::build_message(message &msg, const block_state_ptr &block, const transaction_id_type &tx_id, const bool irreversible) { + // dlog("irreversible: ${a}", ("a", fc::json::to_pretty_string(irreversible))); auto range = irreversible ? irreversible_action_queue.equal_range(tx_id) : action_queue.equal_range(tx_id); msg.irreversible = irreversible; @@ -163,6 +164,7 @@ action_seq_type notify_plugin_impl::on_action_trace(const action_trace &act, con const auto pair = std::make_pair(tx_id, sequenced_action(act.act, act_s, act.receipt.receiver)); action_queue.insert(pair); irreversible_action_queue.insert(pair); + // dlog("on_action_trace: ${a}", ("a", fc::json::to_pretty_string(act.act))); } act_s++; @@ -195,6 +197,7 @@ void notify_plugin_impl::on_accepted_block(const block_state_ptr &block_state) { message msg; transaction_id_type tx_id; + // dlog("block_state->block->transactions: ${a}", ("a", fc::json::to_pretty_string(block_state->block->transactions))); for (const auto &trx : block_state->block->transactions) { if (trx.trx.contains()) @@ -206,11 +209,14 @@ void notify_plugin_impl::on_accepted_block(const block_state_ptr &block_state) tx_id = trx.trx.get().id(); } + // dlog("tx_id: ${a}", ("a", fc::json::to_pretty_string(tx_id))); + // dlog("action_queue.size(): ${a}", ("a", fc::json::to_pretty_string(action_queue.size()))); if (action_queue.count(tx_id)) { build_message(msg, block_state, tx_id, false); } } + // dlog("msg: ${a}", ("a", msg)); if (msg.actions.size() > 0) { send_message(msg); @@ -226,6 +232,7 @@ void notify_plugin_impl::on_irreversible_block(const block_state_ptr &block_stat { message msg; transaction_id_type tx_id; + // dlog("block_state->block->transactions: ${a}", ("a", fc::json::to_pretty_string(block_state->block->transactions))); for (const auto &trx : block_state->block->transactions) { if (trx.trx.contains()) @@ -236,12 +243,14 @@ void notify_plugin_impl::on_irreversible_block(const block_state_ptr &block_stat { tx_id = trx.trx.get().id(); } - + // dlog("tx_id: ${a}", ("a", fc::json::to_pretty_string(tx_id))); + // dlog("irreversible_action_queue.size(): ${a}", ("a", fc::json::to_pretty_string(irreversible_action_queue.size()))); if (irreversible_action_queue.count(tx_id)) { build_message(msg, block_state, tx_id, true); } } + // dlog("msg: ${a}", ("a", msg)); if (msg.actions.size() > 0) { send_message(msg); From a8de458d004b75f160d934c8ff4ff9f74c4f2feb Mon Sep 17 00:00:00 2001 From: thaipanda <45444502+Thaipanda@users.noreply.github.com> Date: Thu, 13 Dec 2018 20:49:57 +0800 Subject: [PATCH 06/21] merge v1.0.2 (#13) * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * add README files * update info * prepare for v1.0.2 --- CMakeLists.txt | 2 +- Docker/README.md | 4 ++-- README.md | 4 ++-- README_CN.md | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b1511b82a0d..1451955870c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,7 +35,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 0) -set(VERSION_PATCH 1) +set(VERSION_PATCH 2) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) diff --git a/Docker/README.md b/Docker/README.md index bbaabec7438..9ff75404b7d 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd bos/Docker docker build . -t boscore/bos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.1 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.2 tag, you could do the following: ```bash -docker build -t boscore/bos:v1.0.1 --build-arg branch=v1.0.1 . +docker build -t boscore/bos:v1.0.2 --build-arg branch=v1.0.2 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index cf1f8585ee5..43fcdfa2d61 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # BOSCore - Born for DApp, be more useable. -## BOSCore Version: v1.0.1 -### Basic EOSIO Version: v1.4.3 +## BOSCore Version: v1.0.2 +### Basic EOSIO Version: v1.4.4 # Background The emergence of EOS has brought new imagination to the blockchain. In just a few months since the main network was launched, the version has undergone dozens of upgrades, not only the stability has been greatly improved, but also the new functions have been gradually realized. The node team is also actively involved in building the EOSIO ecosystem. What is even more exciting is that EOS has attracted more and more development teams. There are already hundreds of DApp running on the EOS main network. The transaction volume and circulation market value far exceed Ethereum, and the space for development is growing broader. diff --git a/README_CN.md b/README_CN.md index 83d97b25226..d4701484fb9 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,7 +1,7 @@ # BOSCore - 更可用的链,为DApp而生。 -## BOSCore Version: v1.0.1 -### Basic EOSIO Version: v1.4.3 +## BOSCore Version: v1.0.2 +### Basic EOSIO Version: v1.4.4 # 背景 EOS的出现给区块链带来了新的想象力,主网启动短短几个月以来,版本经历了几十次升级,不仅稳定性得到了很大提高,并且新功能也逐步实现,各个节点团队也积极参与建设EOSIO生态。让人更加兴奋的是,EOS已经吸引了越来越多的开发团队,当前已经有数百个DApp在EOS主网上面运行,其交易量和流通市值远超以太坊,可发展的空间愈来愈广阔。 From 37f7e6aa2cf15070f25c7b5de7e240dd21d2eca4 Mon Sep 17 00:00:00 2001 From: thaipanda <45444502+Thaipanda@users.noreply.github.com> Date: Thu, 13 Dec 2018 20:50:13 +0800 Subject: [PATCH 07/21] merge v1.0.2 (#12) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * finish BOS basic functions * add README files * update info * Release/1.0.x (#11) * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * add README files * update info * readme for kafka & add time for action (#5) * 重启 节点,黑名单 失效,fixes #7 (#8) * restart sync list db * recovery system account bos to eosio * recovery system account bos to eosio * recovery system account bos to eosio * Fix/#3 notify plugin (#10) * Add debug info * comment log * rm log for notify_plugin * prepare for v1.0.2 --- .gitlab-ci.yml | 2 +- CMakeLists.txt | 6 +- CMakeModules/EosioTester.cmake.in | 2 +- CMakeModules/FindCppkafka.cmake | 29 ++ Docker/Dockerfile | 14 +- Docker/README.md | 38 +- Docker/builder/Dockerfile | 19 +- Docker/config.ini | 5 + Docker/dev/Dockerfile | 4 +- README.md | 108 ++---- README_CN.md | 36 ++ contracts/eosio.system/delegate_bandwidth.cpp | 2 +- contracts/eosio.system/eosio.system.hpp | 1 - contracts/eosio.system/voting.cpp | 11 +- contracts/eosiolib/transaction.h | 22 ++ eosio_build.sh | 35 +- eosio_install.sh | 22 +- libraries/chain/apply_context.cpp | 4 + libraries/chain/block_header.cpp | 10 + libraries/chain/block_header_state.cpp | 4 +- libraries/chain/chain_config.cpp | 6 + libraries/chain/controller.cpp | 157 +++++++- libraries/chain/fork_database.cpp | 2 - .../include/eosio/chain/apply_context.hpp | 1 + libraries/chain/include/eosio/chain/block.hpp | 3 + .../include/eosio/chain/block_header.hpp | 9 +- .../include/eosio/chain/chain_config.hpp | 21 + .../chain/include/eosio/chain/config.hpp | 1 + .../chain/include/eosio/chain/config_xos.hpp | 23 ++ .../chain/include/eosio/chain/controller.hpp | 28 +- .../eosio/chain/global_property_object.hpp | 24 ++ .../include/eosio/chain/resource_limits.hpp | 22 +- .../eosio/chain/resource_limits_private.hpp | 25 +- libraries/chain/include/eosio/chain/types.hpp | 2 + libraries/chain/resource_limits.cpp | 48 ++- libraries/chain/wasm_interface.cpp | 125 +++++- .../testing/include/eosio/testing/tester.hpp | 2 +- plugins/CMakeLists.txt | 2 + .../history_api_plugin/history_api_plugin.cpp | 4 +- plugins/history_plugin/history_plugin.cpp | 93 ++++- .../eosio/history_plugin/history_plugin.hpp | 20 +- plugins/kafka_plugin/CMakeLists.txt | 10 + plugins/kafka_plugin/fifo.h | 85 +++++ plugins/kafka_plugin/kafka.cpp | 186 +++++++++ plugins/kafka_plugin/kafka.hpp | 46 +++ plugins/kafka_plugin/kafka_plugin.cpp | 166 ++++++++ plugins/kafka_plugin/kafka_plugin.hpp | 41 ++ plugins/kafka_plugin/readme.md | 31 ++ plugins/kafka_plugin/try_handle.cpp | 17 + plugins/kafka_plugin/try_handle.hpp | 9 + plugins/kafka_plugin/types.hpp | 94 +++++ .../include/eosio/net_plugin/protocol.hpp | 15 +- plugins/net_plugin/net_plugin.cpp | 135 ++++++- plugins/notify_plugin/CMakeLists.txt | 7 + plugins/notify_plugin/README.md | 73 ++++ .../eosio/notify_plugin/http_async_client.hpp | 104 +++++ .../eosio/notify_plugin/notify_plugin.hpp | 33 ++ plugins/notify_plugin/notify_plugin.cpp | 359 ++++++++++++++++++ plugins/producer_plugin/producer_plugin.cpp | 7 +- programs/cleos/httpc.hpp | 1 + programs/cleos/main.cpp | 20 +- programs/nodeos/CMakeLists.txt | 4 + scripts/eosio_build_amazon.sh | 120 ++++++ scripts/eosio_build_centos.sh | 120 ++++++ scripts/eosio_build_darwin.sh | 120 ++++++ scripts/eosio_build_fedora.sh | 120 ++++++ scripts/eosio_build_ubuntu.sh | 120 ++++++ unittests/actiondemo/actiondemo.abi | 99 +++++ unittests/actiondemo/actiondemo.cpp | 106 ++++++ unittests/actiondemo/actiondemo.hpp | 50 +++ unittests/actiondemo/test.py | 223 +++++++++++ unittests/database_gmr_blklst_tests.cpp | 309 +++++++++++++++ unittests/database_tests.cpp | 3 +- unittests/gmr_test.cpp | 234 ++++++++++++ 74 files changed, 3870 insertions(+), 189 deletions(-) create mode 100644 CMakeModules/FindCppkafka.cmake create mode 100644 README_CN.md create mode 100644 libraries/chain/include/eosio/chain/config_xos.hpp create mode 100644 plugins/kafka_plugin/CMakeLists.txt create mode 100644 plugins/kafka_plugin/fifo.h create mode 100644 plugins/kafka_plugin/kafka.cpp create mode 100644 plugins/kafka_plugin/kafka.hpp create mode 100644 plugins/kafka_plugin/kafka_plugin.cpp create mode 100644 plugins/kafka_plugin/kafka_plugin.hpp create mode 100644 plugins/kafka_plugin/readme.md create mode 100644 plugins/kafka_plugin/try_handle.cpp create mode 100644 plugins/kafka_plugin/try_handle.hpp create mode 100644 plugins/kafka_plugin/types.hpp create mode 100644 plugins/notify_plugin/CMakeLists.txt create mode 100644 plugins/notify_plugin/README.md create mode 100644 plugins/notify_plugin/include/eosio/notify_plugin/http_async_client.hpp create mode 100644 plugins/notify_plugin/include/eosio/notify_plugin/notify_plugin.hpp create mode 100644 plugins/notify_plugin/notify_plugin.cpp create mode 100644 unittests/actiondemo/actiondemo.abi create mode 100644 unittests/actiondemo/actiondemo.cpp create mode 100644 unittests/actiondemo/actiondemo.hpp create mode 100644 unittests/actiondemo/test.py create mode 100644 unittests/database_gmr_blklst_tests.cpp create mode 100644 unittests/gmr_test.cpp diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a51b6e9564e..622a46f2642 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,4 +4,4 @@ build: script: - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY - docker build -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA -f Docker/Dockerfile . - - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA + - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 360d6c973ab..1451955870c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,14 +28,14 @@ include( SetupTargetMacros ) include( InstallDirectoryPermissions ) include( MASSigning ) -set( BLOCKCHAIN_NAME "EOSIO" ) +set( BLOCKCHAIN_NAME "BOS" ) set( CMAKE_CXX_STANDARD 14 ) set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 4) -set(VERSION_PATCH 4) +set(VERSION_MINOR 0) +set(VERSION_PATCH 2) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 741ee5f0e84..ac2bc0221fa 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -97,8 +97,8 @@ macro(add_eosio_test test_name) ${liblogging} ${libchainbase} ${libbuiltins} - ${GMP_LIBRARIES} ${libsecp256k1} + ${GMP_LIBRARIES} LLVMX86Disassembler LLVMX86AsmParser diff --git a/CMakeModules/FindCppkafka.cmake b/CMakeModules/FindCppkafka.cmake new file mode 100644 index 00000000000..aa8bc82ba85 --- /dev/null +++ b/CMakeModules/FindCppkafka.cmake @@ -0,0 +1,29 @@ +# Override default CMAKE_FIND_LIBRARY_SUFFIXES +if (CPPKAFKA_SHARED_LIB) + set(CPPKAFKA_SUFFIX so) +else() + set(CPPKAFKA_SUFFIX a) +endif() +message(STATUS "Cppkafka finding .${CPPKAFKA_SUFFIX} library") + +FIND_PATH( + CPPKAFKA_INCLUDE_DIR cppkafka.h + PATH "/usr/local" + PATH_SUFFIXES "" "cppkafka") +MARK_AS_ADVANCED(CPPKAFKA_INCLUDE_DIR) + +SET(CPPKAFKA_INCLUDE_DIR ${CPPKAFKA_INCLUDE_DIR}) + +FIND_LIBRARY( + CPPKAFKA_LIBRARY + NAMES cppkafka.${CPPKAFKA_SUFFIX} libcppkafka.${CPPKAFKA_SUFFIX} + HINTS ${CPPKAFKA_INCLUDE_DIR}/.. + PATH_SUFFIXES lib${LIB_SUFFIX}) +MARK_AS_ADVANCED(CPPKAFKA_LIBRARY) + +SET(CPPKAFKA_LIBRARY ${CPPKAFKA_LIBRARY}) +message(STATUS "Cppkafka found ${CPPKAFKA_LIBRARY}") + +include(FindPackageHandleStandardArgs) +SET(_CPPKAFKA_REQUIRED_VARS CPPKAFKA_INCLUDE_DIR CPPKAFKA_LIBRARY) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(Cppkafka DEFAULT_MSG ${_CPPKAFKA_REQUIRED_VARS}) diff --git a/Docker/Dockerfile b/Docker/Dockerfile index 24dd447ed75..67f7714c894 100644 --- a/Docker/Dockerfile +++ b/Docker/Dockerfile @@ -1,11 +1,15 @@ -FROM eosio/builder as builder +FROM boscore/builder as builder ARG branch=master ARG symbol=SYS -RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ - && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ +ENV OPENSSL_ROOT_DIR /usr/include/openssl + + +RUN git clone -b $branch https://github.com/boscore/bos.git --recursive \ + && cd bos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ && cmake -H. -B"/tmp/build" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/tmp/build -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ && cmake --build /tmp/build --target install && rm /tmp/build/bin/eosiocpp @@ -15,9 +19,9 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install openssl COPY --from=builder /usr/local/lib/* /usr/local/lib/ COPY --from=builder /tmp/build/bin /opt/eosio/bin COPY --from=builder /tmp/build/contracts /contracts -COPY --from=builder /eos/Docker/config.ini / +COPY --from=builder /bos/Docker/config.ini / COPY --from=builder /etc/eosio-version /etc -COPY --from=builder /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh +COPY --from=builder /bos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh ENV EOSIO_ROOT=/opt/eosio RUN chmod +x /opt/eosio/bin/nodeosd.sh ENV LD_LIBRARY_PATH /usr/local/lib diff --git a/Docker/README.md b/Docker/README.md index db0340e3116..9ff75404b7d 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -1,6 +1,6 @@ # Run in docker -Simple and fast setup of EOS.IO on Docker is also available. +Simple and fast setup of BOSCore on Docker is also available. ## Install Dependencies @@ -12,30 +12,30 @@ Simple and fast setup of EOS.IO on Docker is also available. - At least 7GB RAM (Docker -> Preferences -> Advanced -> Memory -> 7GB or above) - If the build below fails, make sure you've adjusted Docker Memory settings and try again. -## Build eos image +## Build BOSCore image ```bash -git clone https://github.com/EOSIO/eos.git --recursive --depth 1 -cd eos/Docker -docker build . -t eosio/eos +git clone https://github.com/boscore/bos.git --recursive --depth 1 +cd bos/Docker +docker build . -t boscore/bos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.4.4 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.2 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.4.4 --build-arg branch=v1.4.4 . +docker build -t boscore/bos:v1.0.2 --build-arg branch=v1.0.2 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. ```bash -docker build -t eosio/eos --build-arg symbol= . +docker build -t boscore/bos --build-arg symbol= . ``` ## Start nodeos docker container only ```bash -docker run --name nodeos -p 8888:8888 -p 9876:9876 -t eosio/eos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 +docker run --name nodeos -p 8888:8888 -p 9876:9876 -t boscore/bos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 ``` By default, all data is persisted in a docker volume. It can be deleted if the data is outdated or corrupted: @@ -49,7 +49,7 @@ $ docker volume rm fdc265730a4f697346fa8b078c176e315b959e79365fc9cbd11f090ea0cb5 Alternately, you can directly mount host directory into the container ```bash -docker run --name nodeos -v /path-to-data-dir:/opt/eosio/bin/data-dir -p 8888:8888 -p 9876:9876 -t eosio/eos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 +docker run --name nodeos -v /path-to-data-dir:/opt/eosio/bin/data-dir -p 8888:8888 -p 9876:9876 -t boscore/bos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 ``` ## Get chain info @@ -92,13 +92,13 @@ docker-compose stop keosd ### Develop/Build custom contracts -Due to the fact that the eosio/eos image does not contain the required dependencies for contract development (this is by design, to keep the image size small), you will need to utilize the eosio/eos-dev image. This image contains both the required binaries and dependencies to build contracts using eosiocpp. +Due to the fact that the boscore/bos image does not contain the required dependencies for contract development (this is by design, to keep the image size small), you will need to utilize the boscore/bos-dev image. This image contains both the required binaries and dependencies to build contracts using eosiocpp. -You can either use the image available on [Docker Hub](https://hub.docker.com/r/eosio/eos-dev/) or navigate into the dev folder and build the image manually. +You can either use the image available on [Docker Hub](https://hub.docker.com/r/boscore/bos-dev/) or navigate into the dev folder and build the image manually. ```bash cd dev -docker build -t eosio/eos-dev . +docker build -t boscore/bos-dev . ``` ### Change default configuration @@ -133,7 +133,7 @@ docker volume rm keosd-data-volume ### Docker Hub -Docker Hub image available from [docker hub](https://hub.docker.com/r/eosio/eos/). +Docker Hub image available from [docker hub](https://hub.docker.com/r/boscore/bos/). Create a new `docker-compose.yaml` file with the content below ```bash @@ -141,7 +141,7 @@ version: "3" services: nodeosd: - image: eosio/eos:latest + image: boscore/bos:latest command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 hostname: nodeosd ports: @@ -153,7 +153,7 @@ services: - nodeos-data-volume:/opt/eosio/bin/data-dir keosd: - image: eosio/eos:latest + image: boscore/bos:latest command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=localhost:8900 --http-alias=keosd:8900 hostname: keosd links: @@ -169,13 +169,13 @@ volumes: *NOTE:* the default version is the latest, you can change it to what you want -run `docker pull eosio/eos:latest` +run `docker pull boscore/bos:latest` run `docker-compose up` -### EOSIO Testnet +### BOSCore Testnet -We can easily set up a EOSIO local testnet using docker images. Just run the following commands: +We can easily set up a BOSCore local testnet using docker images. Just run the following commands: Note: if you want to use the mongo db plugin, you have to enable it in your `data-dir/config.ini` first. diff --git a/Docker/builder/Dockerfile b/Docker/builder/Dockerfile index cac09937cd0..74677b701c4 100644 --- a/Docker/builder/Dockerfile +++ b/Docker/builder/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 -LABEL author="xiaobo " maintainer="Xiaobo Huang-Ming Huang " version="0.1.1" \ - description="This is a base image for building eosio/eos" +LABEL author="xiaobo " maintainer="Xiaobo Huang-Ming Huang Winlin " version="0.1.2" \ + description="This is a base image for building boscore/bos" RUN echo 'APT::Install-Recommends 0;' >> /etc/apt/apt.conf.d/01norecommends \ && echo 'APT::Install-Suggests 0;' >> /etc/apt/apt.conf.d/01norecommends \ @@ -56,3 +56,18 @@ RUN git clone --depth 1 -b releases/v3.3 https://github.com/mongodb/mongo-cxx-dr && make -j$(nproc) \ && make install \ && cd ../../ && rm -rf mongo-cxx-driver + +RUN git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git \ + && cd librdkafka/ \ + && cmake -H. -B_cmake_build \ + && cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build \ + && cd _cmake_build \ + && make install \ + && cd ../../ && rm -rf librdkafka + +RUN git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git \ + && cd cppkafka/ \ + && mkdir build && cd build \ + && cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. \ + && make install \ + && cd ../../ && rm -rf cppkafka diff --git a/Docker/config.ini b/Docker/config.ini index d9871858f19..71ae5c6c0ed 100644 --- a/Docker/config.ini +++ b/Docker/config.ini @@ -77,6 +77,9 @@ access-control-allow-credentials = false # The actual host:port used to listen for incoming p2p connections. (eosio::net_plugin) p2p-listen-endpoint = 0.0.0.0:9876 +#The p2p-discoverable is used to enable or disable p2p network self-discovery (eosio::net_plugin) +#p2p-discoverable= + # An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. (eosio::net_plugin) # p2p-server-address = @@ -158,3 +161,5 @@ txn-reference-block-lag = 0 # plugin = plugin = eosio::chain_api_plugin plugin = eosio::history_api_plugin +# enable this option to produce blocks +#plugin = eosio::producer_plugin diff --git a/Docker/dev/Dockerfile b/Docker/dev/Dockerfile index f2dea74ac6c..cd79c5e0e2d 100644 --- a/Docker/dev/Dockerfile +++ b/Docker/dev/Dockerfile @@ -1,8 +1,8 @@ -FROM eosio/builder +FROM boscore/builder ARG branch=master ARG symbol=SYS -RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ +RUN git clone -b $branch https://github.com/boscore/bos.git --recursive \ && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ && cmake -H. -B"/opt/eosio" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eosio -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ diff --git a/README.md b/README.md index 2a048d5b973..43fcdfa2d61 100644 --- a/README.md +++ b/README.md @@ -1,95 +1,37 @@ +# BOSCore - Born for DApp, be more useable. -# EOSIO - The Most Powerful Infrastructure for Decentralized Applications +## BOSCore Version: v1.0.2 +### Basic EOSIO Version: v1.4.4 -[![Build status](https://badge.buildkite.com/370fe5c79410f7d695e4e34c500b4e86e3ac021c6b1f739e20.svg?branch=master)](https://buildkite.com/EOSIO/eosio) +# Background +The emergence of EOS has brought new imagination to the blockchain. In just a few months since the main network was launched, the version has undergone dozens of upgrades, not only the stability has been greatly improved, but also the new functions have been gradually realized. The node team is also actively involved in building the EOSIO ecosystem. What is even more exciting is that EOS has attracted more and more development teams. There are already hundreds of DApp running on the EOS main network. The transaction volume and circulation market value far exceed Ethereum, and the space for development is growing broader. +During the gradual development of the EOS main network, we found some deviations from expectations. As the most competitive third-generation public chain, we look forward to seeing more and more applications running on EOS. Developers will use EOS as their preferred platform for application development. But due to the limitations of the current EOS resource model, higher cost of use including creating more accounts for users and deploying operating DApp. The key technology IBC needed for the millions of TPS to be realized in the white paper has not been promoted. The main network has repeatedly experienced insufficient CPU computing resources, which has intensified the urgency of the demand for cross-chain communication. In addition, due to the Pipeline-DPOS consensus algorithm adopted by EOSIO, a transaction takes nearly three minutes to ensure that it cannot be changed. Although it is much better than Bitcoin and Ethereum, it also brings restrictions to a lot of EOS application scenarios. Fast payment can only focus on small transfers, large transfers must wait long enough to ensure that they cannot be changed, which limits the payment experience of users on the chain and under the chain. +In addition to the above mentioned, there are many other improvements that have been actively discussed in our community. From this, we feel that we should try more on EOS and let more developers or teams participate in the construction of EOSIO ecosystem. we will together make efforts for the blockchain to land in different scenarios in different industries. As a fully community-maintained EOS side chain, BOS will make more attempts based on its inherited good functions and will feed back to the EOSIO ecosystem its proven new features and functions. -Welcome to the EOSIO source code repository! This software enables businesses to rapidly build and deploy high-performance and high-security blockchain-based applications. +# Overview +BOS is committed to providing users with easy-to-access and easy-to-use blockchain services, providing a more user-friendly infrastructure for DApp operations, working to support richer application scenarios, and actively experimenting with DApp booms. In addition to technical improvements, BOS will also try other aspects. For example, in order to increase the participation of users in voting, estimator technology can be used to motivate accounts that meet clear rules. The rewards of BP on BOS will be adjusted according to the number of DApp on the chain, TPS, market value, liquidity and other indicators. Each BP is an encouragement for providing more resources for the ecology. A resolution reached by a community referendum will be coded as much as possible, to reduce human factors in the process, keep the process on chain, and maintain fairness and transparency. +The codes of the BOS chain are fully contributed and maintained by the community. Each ecological participant can submit codes or suggestions. The related process will refer to existing open source software, such as PEP (Python Enhancement Proposals). +In order to encourage the development of DApp in BOS, the BOS Foundation will provide Token replacement of low-cost resource mortgage services for DApp in BOS, reduce the operating costs of DApp in the early stage; in addition, it will also regularly provide BOS incentives to developers who contribute on a regular basis in order to establish a mutually reinforcing community development trend. -Some of the groundbreaking features of EOSIO include: +# Developer Rewards -1. Free Rate Limited Transactions -1. Low Latency Block confirmation (0.5 seconds) -1. Low-overhead Byzantine Fault Tolerant Finality -1. Designed for optional high-overhead, low-latency BFT finality -1. Smart contract platform powered by Web Assembly -1. Designed for Sparse Header Light Client Validation -1. Scheduled Recurring Transactions -1. Time Delay Security -1. Hierarchical Role Based Permissions -1. Support for Biometric Hardware Secured Keys (e.g. Apple Secure Enclave) -1. Designed for Parallel Execution of Context Free Validation Logic -1. Designed for Inter Blockchain Communication +An additional 0.8% issuance will be given to the BOS eco-contribution code developer every year. Fifty candidates will be nominated by the community. Top 50 BPs vote 40 winners to get the awards: the top 10 share 40%, people ranked 11 to 20 share 30%, the last 20 share the remaining 30% evenly. The reward happens once every 3 months and each reward will be carried out with a one-week publicity. It will be re-evaluated if there is a reasonable objection. And each reward list will be recorded on chain. +As BOS continues to develop, developer rewards will be appropriately adjusted to allow the community to provide more momentum for the evolution of BOS. -EOSIO is released under the open source MIT license and is offered “AS IS” without warranty of any kind, express or implied. Any security provided by the EOSIO software depends in part on how it is used, configured, and deployed. EOSIO is built upon many third-party libraries such as Binaryen (Apache License) and WAVM (BSD 3-clause) which are also provided “AS IS” without warranty of any kind. Without limiting the generality of the foregoing, Block.one makes no representation or guarantee that EOSIO or any third-party libraries will perform as intended or will be free of errors, bugs or faulty code. Both may fail in large or small ways that could completely or partially limit functionality or compromise computer systems. If you use or implement EOSIO, you do so at your own risk. In no event will Block.one be liable to any party for any damages whatsoever, even if it had been advised of the possibility of damage. +## Links +1. [Website](https://boscore.io) +2. [Developer Telegram Group](https://t.me/BOSCoreDev) +3. [WhitePaper](https://github.com/boscore/Documentation/blob/master/BOSCoreTechnicalWhitePaper.md) +4. [白皮书](https://github.com/boscore/Documentation/blob/master/zh-CN/BOSCoreTechnicalWhitePaper.md) -Block.one is neither launching nor operating any initial public blockchains based upon the EOSIO software. This release refers only to version 1.0 of our open source software. We caution those who wish to use blockchains built on EOSIO to carefully vet the companies and organizations launching blockchains based on EOSIO before disclosing any private keys to their derivative software. +## Start +1. Build from code : `bash ./eosio_build.sh -s BOS` +2. Docker Style,check [Docker](./Docker/README.md) -There is no public testnet running currently. +BOSCore bases on EOSIO, so you can also referer: -**If you have previously installed EOSIO, please run the `eosio_uninstall` script (it is in the directory where you cloned EOSIO) before downloading and using the binary releases.** +[Getting Started](https://developers.eos.io/eosio-nodeos/docs/overview-1) -#### Mac OS X Brew Install -```sh -$ brew tap eosio/eosio -$ brew install eosio -``` -#### Mac OS X Brew Uninstall -```sh -$ brew remove eosio -``` -#### Ubuntu 18.04 Debian Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.4.4/eosio_1.4.4-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.4.4-1-ubuntu-18.04_amd64.deb -``` -#### Ubuntu 16.04 Debian Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.4.4/eosio_1.4.4-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.4.4-1-ubuntu-16.04_amd64.deb -``` -#### Debian Package Uninstall -```sh -$ sudo apt remove eosio -``` -#### Centos RPM Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.4.4/eosio-1.4.4-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.4.4-1.el7.x86_64.rpm -``` -#### Centos RPM Package Uninstall -```sh -$ sudo yum remove eosio.cdt -``` -#### Fedora RPM Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.4.4/eosio-1.4.4-1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.4.4-1.fc27.x86_64.rpm -``` -#### Fedora RPM Package Uninstall -```sh -$ sudo yum remove eosio.cdt -``` +[EOSIO Developer Portal](https://developers.eos.io). -## Supported Operating Systems -EOSIO currently supports the following operating systems: -1. Amazon 2017.09 and higher -2. Centos 7 -3. Fedora 25 and higher (Fedora 27 recommended) -4. Mint 18 -5. Ubuntu 16.04 (Ubuntu 16.10 recommended) -6. Ubuntu 18.04 -7. MacOS Darwin 10.12 and higher (MacOS 10.13.x recommended) -## Resources -1. [Website](https://eos.io) -1. [Blog](https://medium.com/eosio) -1. [Developer Portal](https://developers.eos.io) -1. [StackExchange for Q&A](https://eosio.stackexchange.com/) -1. [Community Telegram Group](https://t.me/EOSProject) -1. [Developer Telegram Group](https://t.me/joinchat/EaEnSUPktgfoI-XPfMYtcQ) -1. [White Paper](https://github.com/EOSIO/Documentation/blob/master/TechnicalWhitePaper.md) -1. [Roadmap](https://github.com/EOSIO/Documentation/blob/master/Roadmap.md) - - -## Getting Started -Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in [Getting Started](https://developers.eos.io/eosio-nodeos/docs/overview-1) on the [EOSIO Developer Portal](https://developers.eos.io). diff --git a/README_CN.md b/README_CN.md new file mode 100644 index 00000000000..d4701484fb9 --- /dev/null +++ b/README_CN.md @@ -0,0 +1,36 @@ +# BOSCore - 更可用的链,为DApp而生。 + +## BOSCore Version: v1.0.2 +### Basic EOSIO Version: v1.4.4 + +# 背景 +EOS的出现给区块链带来了新的想象力,主网启动短短几个月以来,版本经历了几十次升级,不仅稳定性得到了很大提高,并且新功能也逐步实现,各个节点团队也积极参与建设EOSIO生态。让人更加兴奋的是,EOS已经吸引了越来越多的开发团队,当前已经有数百个DApp在EOS主网上面运行,其交易量和流通市值远超以太坊,可发展的空间愈来愈广阔。 +在EOS主网逐渐发展的过程中,我们发现了一些偏离期望的地方。作为最有竞争力的第三代公链,大家希望看到的是能够有更多、更丰富的应用能够在EOS上面运行,开发者会将EOS作为自己应用开发的首选平台,但是由于目前EOS的资源模型的限制,导致了很高的使用成本,包括为用户创建更多的账户,以及部署运营DApp需要的较高成本。针对白皮书中要实现的上百万TPS需要的关键技术IBC,一直没有进行推进,主网多次出现CPU计算资源不足的情况,更是加剧了对跨链通讯需求的迫切性。此外,由于EOSIO采用的Pipeline-DPOS共识机制,一个交易需要近三分钟才能保证不可更改,虽然相较比特币、以太坊是有很大的进步,但是这也给EOS的应用场景带来很大限制,快速支付只能聚焦于小额转账,大额转账必须要等待足够长的时间才能保证不可更改,这就限制了链上、链下用户支付体验。 +除了上面提到的情况,还有很多其他改进想法一直在我们社区进行活跃的讨论,由此,我们觉得应该基于EOS进行更多的尝试,让更多的开发者或者团队来参与到EOSIO生态的建设中来,一起为区块链在不同行业不同场景中的落地做出一份努力。BOS作为一条完全由社区维护的EOS侧链,在继承其良好功能的基础上,会进行更多的尝试,并且会将经过验证的新特性、新功能反哺给EOSIO生态。 + +# 概述 +BOS致力于为用户提供方便进入并易于使用的区块链服务,为DApp运营提供更友好的基础设施,为支持更丰富的应用场景努力,为DApp大繁荣进行积极尝试。除了技术改进以外,BOS也会进行其他方面的尝试。比如,为了提高用户投票参与度,可以通过预言机技术来针对符合明确规则的账户进行激励;BOS上面的BP的奖励会根据链上DApp的数量、TPS、市值、流通量等指标进行调整,鼓励每个BP为生态提供更多资源;一项社区公投达成的决议将会尽量被代码化,减少人为的因素在里面,流程上链,保持公正透明。 +BOS链的代码完全由社区贡献并维护,每个生态参与者都可以提交代码或者建议,相关的流程会参考已有开源软件来进行,比如PEP(Python Enhancement Proposals)。 +为鼓励DApp在BOS的发展,BOS基金会将会为其上的DApp提供Token置换的低成本的资源抵押服务,降低DApp前期的运营成本;此外还会定期对做出贡献的开发者提供BOS激励,以便建立起一个相互促进的社区发展趋势。 + +# 开发者激励 +每年增发 0.8% 面向BOS生态贡献代码的开发者,由社区提出50名奖励名单,由前50名BP投票选出40名的获奖者获取对应奖励:前10名获取40%,11到20名获取30%,最后20名均分30%,奖励周期3个月一次,每次奖励名额都会进行为期一周的公示,如果有合理异议,将会重新评审,每次奖励名单都会上链记录。 +随着BOS的不断发展,开发者奖励会适当调整,让社区为BOS的进化提供更多动力。 + + +## 资源 +1. [官网](https://boscore.io) +2. [开发者社群](https://t.me/BOSCoreDev) +3. [WhitePaper](https://github.com/boscore/Documentation/blob/master/BOSCoreTechnicalWhitePaper.md) +4. [白皮书](https://github.com/boscore/Documentation/blob/master/zh-CN/BOSCoreTechnicalWhitePaper.md) + +## 开始 +1. 源码直接编译: `bash ./eosio_build.sh -s BOS` +2. Docker方式部署,参看 [Docker](./Docker/README.md) + +BOSCore是基于EOSIO技术的扩展,所以EOSIO的相关资料也可以参考: + +[EOSIO 开始](https://developers.eos.io/eosio-nodeos/docs/overview-1) + +[EOSIO 开发者门户](https://developers.eos.io). + diff --git a/contracts/eosio.system/delegate_bandwidth.cpp b/contracts/eosio.system/delegate_bandwidth.cpp index 1e190e9fe3c..95a40781530 100644 --- a/contracts/eosio.system/delegate_bandwidth.cpp +++ b/contracts/eosio.system/delegate_bandwidth.cpp @@ -205,7 +205,7 @@ namespace eosiosystem { const int64_t max_claimable = 100'000'000'0000ll; const int64_t claimable = int64_t(max_claimable * double(now()-base_time) / (10*seconds_per_year) ); - eosio_assert( max_claimable - claimable <= stake, "b1 can only claim their tokens over 10 years" ); + eosio_assert( max_claimable - claimable <= stake, "bosbosbosbos can only claim their tokens over 10 years" ); } void system_contract::changebw( account_name from, account_name receiver, diff --git a/contracts/eosio.system/eosio.system.hpp b/contracts/eosio.system/eosio.system.hpp index a33238a1eaa..66964e39659 100644 --- a/contracts/eosio.system/eosio.system.hpp +++ b/contracts/eosio.system/eosio.system.hpp @@ -172,7 +172,6 @@ namespace eosiosystem { void undelegatebw( account_name from, account_name receiver, asset unstake_net_quantity, asset unstake_cpu_quantity ); - /** * Increases receiver's ram quota based upon current price and quantity of * tokens provided. An inline transfer from receiver to system contract of diff --git a/contracts/eosio.system/voting.cpp b/contracts/eosio.system/voting.cpp index 166f1707cd7..feeb53fc3d8 100644 --- a/contracts/eosio.system/voting.cpp +++ b/contracts/eosio.system/voting.cpp @@ -86,8 +86,15 @@ namespace eosiosystem { return; } - /// sort by producer name - std::sort( top_producers.begin(), top_producers.end() ); + /// sort by producer location + struct { + bool operator()(std::pair a, std::pair b) const + { + return a.second ==b.second?a.first producers; diff --git a/contracts/eosiolib/transaction.h b/contracts/eosiolib/transaction.h index dd7c05ded17..db115ca27e1 100644 --- a/contracts/eosiolib/transaction.h +++ b/contracts/eosiolib/transaction.h @@ -94,6 +94,28 @@ extern "C" { */ size_t transaction_size(); + /** + * Get transaction id + * + * @param id : return id + */ + void get_transaction_id( transaction_id_type* id ); + + /** + * Get the action globally unique sequence + * + * @param seq : return sequence + */ + void get_action_sequence(uint64_t* seq); + + /** + * Get the producer's signature for the action + * @param sig : Memory buffer + * @param siglen :Memory buffer size + * @return : Return valid data size + */ + int bpsig_action_time_seed( const char* sig, size_t siglen ); + /** * Gets the block number used for TAPOS on the currently executing transaction. * diff --git a/eosio_build.sh b/eosio_build.sh index b1988d74f0c..fafdbbaeb8e 100755 --- a/eosio_build.sh +++ b/eosio_build.sh @@ -120,8 +120,8 @@ if [ ! -d "${SOURCE_DIR}/.git" ]; then printf "\\n\\tThis build script only works with sources cloned from git\\n" - printf "\\tPlease clone a new eos directory with 'git clone https://github.com/EOSIO/eos --recursive'\\n" - printf "\\tSee the wiki for instructions: https://github.com/EOSIO/eos/wiki\\n" + printf "\\tPlease clone a new bos directory with 'git clone https://github.com/boscore/bos --recursive'\\n" + printf "\\tSee the wiki for instructions: https://github.com/boscore/bos/wiki\\n" exit 1 fi @@ -238,7 +238,7 @@ . "$FILE" - printf "\\n\\n>>>>>>>> ALL dependencies sucessfully found or installed . Installing EOSIO\\n\\n" + printf "\\n\\n>>>>>>>> ALL dependencies sucessfully found or installed . Installing BOSCore\\n\\n" printf ">>>>>>>> CMAKE_BUILD_TYPE=%s\\n" "${CMAKE_BUILD_TYPE}" printf ">>>>>>>> ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" printf ">>>>>>>> DOXYGEN=%s\\n\\n" "${DOXYGEN}" @@ -267,41 +267,42 @@ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_INSTALL_PREFIX="/usr/local/eosio" ${LOCAL_CMAKE_FLAGS} "${SOURCE_DIR}" then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> CMAKE building EOSIO has exited with the above error.\\n\\n" + printf "\\n\\t>>>>>>>>>>>>>>>>>>>> CMAKE building BOSCore has exited with the above error.\\n\\n" exit -1 fi if [ "${START_MAKE}" == "false" ]; then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> EOSIO has been successfully configured but not yet built.\\n\\n" + printf "\\n\\t>>>>>>>>>>>>>>>>>>>> BOSCore has been successfully configured but not yet built.\\n\\n" exit 0 fi if [ -z ${JOBS} ]; then JOBS=$CPU_CORE; fi # Future proofing: Ensure $JOBS is set (usually set in scripts/eosio_build_*.sh scripts) if ! make -j"${JOBS}" then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> MAKE building EOSIO has exited with the above error.\\n\\n" + printf "\\n\\t>>>>>>>>>>>>>>>>>>>> MAKE building BOSCore has exited with the above error.\\n\\n" exit -1 fi TIME_END=$(( $(date -u +%s) - ${TIME_BEGIN} )) - printf "\n\n${bldred}\t _______ _______ _______ _________ _______\n" - printf '\t( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' - printf "\t| ( \/| ( ) || ( \/ ) ( | ( ) |\n" - printf "\t| (__ | | | || (_____ | | | | | |\n" - printf "\t| __) | | | |(_____ ) | | | | | |\n" - printf "\t| ( | | | | ) | | | | | | |\n" - printf "\t| (____/\| (___) |/\____) |___) (___| (___) |\n" - printf "\t(_______/(_______)\_______)\_______/(_______)\n${txtrst}" + printf "\n\n${bldred}\t ______ _______ _______ _______ _______ _______ _______ \n" + printf "\t( ___ \ ( ___ )( ____ \( ____ \( ___ )( ____ )( ____ \ \n" + printf "\t| ( ) )| ( ) || ( \/| ( \/| ( ) || ( )|| ( \/\n" + printf "\t| (__/ / | | | || (_____ | | | | | || (____)|| (__ \n" + printf "\t| __ ( | | | |(_____ )| | | | | || __)| __) \n" + printf "\t| ( \ \ | | | | ) || | | | | || (\ ( | ( \n" + printf "\t| )___) )| (___) |/\____) || (____/\| (___) || ) \ \__| (____/\ \n" + printf "\t|/ \___/ (_______)\_______)(_______/(_______)|/ \__/(_______/\n\n${txtrst}" - printf "\\n\\tEOSIO has been successfully built. %02d:%02d:%02d\\n\\n" $(($TIME_END/3600)) $(($TIME_END%3600/60)) $(($TIME_END%60)) + printf "\\n\\tBOSCore has been successfully built. %02d:%02d:%02d\\n\\n" $(($TIME_END/3600)) $(($TIME_END%3600/60)) $(($TIME_END%60)) printf "\\tTo verify your installation run the following commands:\\n" print_instructions printf "\\tFor more information:\\n" - printf "\\tEOSIO website: https://eos.io\\n" - printf "\\tEOSIO Telegram channel @ https://t.me/EOSProject\\n" + printf "\\tBOSCore website: https://boscore.io\\n" + printf "\\tBOSCore Telegram channel @ https://t.me/BOSCoreProject\\n" + printf "\\tBOSCore wiki: https://github.com/boscore/bos/wiki\\n" printf "\\tEOSIO resources: https://eos.io/resources/\\n" printf "\\tEOSIO Stack Exchange: https://eosio.stackexchange.com\\n" printf "\\tEOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" diff --git a/eosio_install.sh b/eosio_install.sh index 9ed195df7d0..fcb6e3c81d7 100755 --- a/eosio_install.sh +++ b/eosio_install.sh @@ -103,18 +103,20 @@ fi install_symlinks create_cmake_symlink "eosio-config.cmake" - printf "\n\n${bldred}\t _______ _______ _______ _________ _______\n" - printf '\t( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' - printf "\t| ( \/| ( ) || ( \/ ) ( | ( ) |\n" - printf "\t| (__ | | | || (_____ | | | | | |\n" - printf "\t| __) | | | |(_____ ) | | | | | |\n" - printf "\t| ( | | | | ) | | | | | | |\n" - printf "\t| (____/\| (___) |/\____) |___) (___| (___) |\n" - printf "\t(_______/(_______)\_______)\_______/(_______)\n${txtrst}" + printf "\n\n${bldred}\t ______ _______ _______ _______ _______ _______ _______ \n" + printf "\t( ___ \ ( ___ )( ____ \( ____ \( ___ )( ____ )( ____ \ \n" + printf "\t| ( ) )| ( ) || ( \/| ( \/| ( ) || ( )|| ( \/\n" + printf "\t| (__/ / | | | || (_____ | | | | | || (____)|| (__ \n" + printf "\t| __ ( | | | |(_____ )| | | | | || __)| __) \n" + printf "\t| ( \ \ | | | | ) || | | | | || (\ ( | ( \n" + printf "\t| )___) )| (___) |/\____) || (____/\| (___) || ) \ \__| (____/\ \n" + printf "\t|/ \___/ (_______)\_______)(_______/(_______)|/ \__/(_______/\n\n${txtrst}" printf "\\tFor more information:\\n" - printf "\\tEOSIO website: https://eos.io\\n" - printf "\\tEOSIO Telegram channel @ https://t.me/EOSProject\\n" + printf "\\tBOSCore website: https://boscore.io\\n" + printf "\\tBOSCore Telegram channel @ https://t.me/BOSCoreProject\\n" + printf "\\tBOSCore wiki: https://github.com/boscore/bos/wiki\\n" printf "\\tEOSIO resources: https://eos.io/resources/\\n" printf "\\tEOSIO Stack Exchange: https://eosio.stackexchange.com\\n" printf "\\tEOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" + diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index de1450013d8..07ab384eeff 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -44,6 +44,9 @@ void apply_context::exec_one( action_trace& trace ) trace.act = act; trace.context_free = context_free; + const auto& p = control.get_dynamic_global_properties(); + global_action_sequence = p.global_action_sequence + 1; + const auto& cfg = control.get_global_properties().configuration; try { try { @@ -79,6 +82,7 @@ void apply_context::exec_one( action_trace& trace ) r.global_sequence = next_global_sequence(); r.recv_sequence = next_recv_sequence( receiver ); + global_action_sequence = 0; const auto& account_sequence = db.get(act.account); r.code_sequence = account_sequence.code_sequence; // could be modified by action execution above diff --git a/libraries/chain/block_header.cpp b/libraries/chain/block_header.cpp index 8ba95b705e2..b73338b3ca2 100644 --- a/libraries/chain/block_header.cpp +++ b/libraries/chain/block_header.cpp @@ -28,5 +28,15 @@ namespace eosio { namespace chain { return result; } + void block_header::set_block_extensions_mroot(digest_type& mroot) + { + if (header_extensions.size() < 1) + header_extensions.emplace_back(); + + header_extensions[0].first = static_cast(block_header_extensions_type::block_extensions_mroot); + header_extensions[0].second.resize(mroot.data_size()); + std::copy(mroot.data(), mroot.data() + mroot.data_size(), header_extensions[0].second.data()); + } + } } diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 6e7b339c42c..2ae15af7341 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -145,7 +145,7 @@ namespace eosio { namespace chain { */ block_header_state block_header_state::next( const signed_block_header& h, bool trust )const { EOS_ASSERT( h.timestamp != block_timestamp_type(), block_validate_exception, "", ("h",h) ); - EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); + //EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); EOS_ASSERT( h.timestamp > header.timestamp, block_validate_exception, "block must be later in time" ); EOS_ASSERT( h.previous == id, unlinkable_block_exception, "block must link to current state" ); @@ -175,8 +175,10 @@ namespace eosio { namespace chain { result.header.action_mroot = h.action_mroot; result.header.transaction_mroot = h.transaction_mroot; result.header.producer_signature = h.producer_signature; + result.header.header_extensions = h.header_extensions; result.id = result.header.id(); + // ASSUMPTION FROM controller_impl::apply_block = all untrusted blocks will have their signatures pre-validated here if( !trust ) { EOS_ASSERT( result.block_signing_key == result.signee(), wrong_signing_key, "block not signed by expected key", diff --git a/libraries/chain/chain_config.cpp b/libraries/chain/chain_config.cpp index 974675749fb..efb66bba95f 100644 --- a/libraries/chain/chain_config.cpp +++ b/libraries/chain/chain_config.cpp @@ -43,4 +43,10 @@ namespace eosio { namespace chain { "max authority depth should be at least 1" ); } +void chain_config2::validate() const{ + EOS_ASSERT(std::numeric_limits::max() > actor_blacklist.size(), action_validate_exception, "Overflow in blacklist when adding actor blacklist!"); + EOS_ASSERT(std::numeric_limits::max() > contract_blacklist.size(), action_validate_exception, "Overflow in blacklist when adding contract blacklist!"); + EOS_ASSERT(std::numeric_limits::max() > resource_greylist.size(), action_validate_exception, "Overflow in greylistwhen adding resource greylist!"); +} + } } // namespace eosio::chain diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 41e9d551728..cb824cf45ba 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -33,6 +33,7 @@ using controller_index_set = index_set< account_index, account_sequence_index, global_property_multi_index, + global_property2_multi_index, dynamic_global_property_multi_index, block_summary_multi_index, transaction_multi_index, @@ -108,6 +109,8 @@ struct pending_state { optional _producer_block_id; + std::function _signer; + void push() { _db_session.push(); } @@ -391,6 +394,11 @@ struct controller_impl { ilog( "database initialized with hash: ${hash}", ("hash", hash) ); } + //*bos begin* + sync_name_list(list_type::actor_blacklist_type,true); + sync_name_list(list_type::contract_blacklist_type,true); + sync_name_list(list_type::resource_greylist_type,true); + //*bos end* } ~controller_impl() { @@ -637,6 +645,17 @@ struct controller_impl { }); db.create([](auto&){}); + // *bos begin* + //guaranteed minimum resources which is abbreviated gmr + db.create([&](auto &gpo) { + gpo.gmr.cpu_us = config::default_gmr_cpu_limit; + gpo.gmr.net_byte = config::default_gmr_net_limit; + gpo.gmr.ram_byte = config::default_gmr_ram_limit; + }); + + + // *bos end* + authorization.initialize_database(); resource_limits.initialize_database(); @@ -662,7 +681,73 @@ struct controller_impl { conf.genesis.initial_timestamp ); } + // "bos begin" + void set_name_list(list_type list, list_action_type action, std::vector name_list) + { + int64_t lst = static_cast(list); + + EOS_ASSERT(list >= list_type::actor_blacklist_type && list < list_type::list_type_count, transaction_exception, "unknown list type : ${l}, action: ${n}", ("l", static_cast(list))("n", static_cast(action))); + vector *> lists = {&conf.actor_blacklist, &conf.contract_blacklist, &conf.resource_greylist}; + EOS_ASSERT(lists.size() == static_cast(list_type::list_type_count) - 1, transaction_exception, " list size wrong : ${l}, action: ${n}", ("l", static_cast(list))("n", static_cast(action))); + flat_set &lo = *lists[lst - 1]; + + if (action == list_action_type::insert_type) + { + lo.insert(name_list.begin(), name_list.end()); + } + else if (action == list_action_type::remove_type) + { + flat_set name_set(name_list.begin(), name_list.end()); + + flat_set results; + results.reserve(lo.size()); + set_difference(lo.begin(), lo.end(), + name_set.begin(), name_set.end(), + std::inserter(results,results.begin())); + + lo = results; + } + + sync_name_list(list); + } + + void sync_list_and_db(list_type list, global_property2_object &gprops2,bool isMerge=false) + { + int64_t lst = static_cast(list); + EOS_ASSERT( list >= list_type::actor_blacklist_type && list < list_type::list_type_count, transaction_exception, "unknown list type : ${l}, ismerge: ${n}", ("l", static_cast(list))("n", isMerge)); + vector *> lists = {&gprops2.cfg.actor_blacklist, &gprops2.cfg.contract_blacklist, &gprops2.cfg.resource_greylist}; + vector *> conflists = {&conf.actor_blacklist, &conf.contract_blacklist, &conf.resource_greylist}; + EOS_ASSERT(lists.size() == static_cast(list_type::list_type_count) - 1, transaction_exception, " list size wrong : ${l}, ismerge: ${n}", ("l", static_cast(list))("n", isMerge)); + shared_vector &lo = *lists[lst - 1]; + flat_set &clo = *conflists[lst - 1]; + + if (isMerge) + { + //initialize, merge elements and deduplication between list and db.result save to list + for (auto &a : lo) + { + clo.insert(a); + } + } + + //clear list from db and save merge result to db object + lo.clear(); + for (auto &a : clo) + { + lo.push_back(a); + } + } + + void sync_name_list(list_type list,bool isMerge=false) + { + const auto &gpo2 = db.get(); + db.modify(gpo2, [&](auto &gprops2) { + sync_list_and_db(list, gprops2,isMerge); + }); + } + + // "bos end" /** * @post regardless of the success of commit block there is no active pending block @@ -1074,7 +1159,7 @@ struct controller_impl { void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, - const optional& producer_block_id ) + const optional& producer_block_id , std::function signer = nullptr) { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); @@ -1093,6 +1178,7 @@ struct controller_impl { pending->_block_status = s; pending->_producer_block_id = producer_block_id; + pending->_signer = signer; pending->_pending_block_state = std::make_shared( *head, when ); // promotes pending schedule (if any) to active pending->_pending_block_state->in_current_chain = true; @@ -1160,10 +1246,13 @@ struct controller_impl { void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { try { - EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); + //EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); auto producer_block_id = b->id(); start_block( b->timestamp, b->confirmed, s , producer_block_id); + pending->_pending_block_state->block->header_extensions = b->header_extensions; + pending->_pending_block_state->block->block_extensions = b->block_extensions; + transaction_trace_ptr trace; for( const auto& receipt : b->transactions ) { @@ -1228,7 +1317,6 @@ struct controller_impl { void push_block( const signed_block_ptr& b, controller::block_status s ) { EOS_ASSERT(!pending, block_validate_exception, "it is not valid to push a block when there is a pending block"); - auto reset_prod_light_validation = fc::make_scoped_exit([old_value=trusted_producer_light_validation, this]() { trusted_producer_light_validation = old_value; }); @@ -1361,6 +1449,17 @@ struct controller_impl { pending->_pending_block_state->header.transaction_mroot = merkle( move(trx_digests) ); } + void set_ext_merkle() { + vector ext_digests; + const auto& exts = pending->_pending_block_state->block->block_extensions; + ext_digests.reserve( exts.size()); + for( const auto& a : exts ) + ext_digests.emplace_back( digest_type::hash(a) ); + + auto mroot = merkle( move(ext_digests)); + pending->_pending_block_state->header.set_block_extensions_mroot(mroot); + } + void finalize_block() { @@ -1385,16 +1484,24 @@ struct controller_impl { // Update resource limits: resource_limits.process_account_limit_updates(); const auto& chain_config = self.get_global_properties().configuration; + const auto& gmr = self.get_global_properties2().gmr;//guaranteed minimum resources which is abbreviated gmr + uint32_t max_virtual_mult = 1000; uint64_t CPU_TARGET = EOS_PERCENT(chain_config.max_block_cpu_usage, chain_config.target_block_cpu_usage_pct); resource_limits.set_block_parameters( { CPU_TARGET, chain_config.max_block_cpu_usage, config::block_cpu_usage_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}}, {EOS_PERCENT(chain_config.max_block_net_usage, chain_config.target_block_net_usage_pct), chain_config.max_block_net_usage, config::block_size_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}} ); + + resource_limits.set_gmr_parameters( + { gmr.ram_byte, gmr.cpu_us,gmr.net_byte} + ); + resource_limits.process_block_usage(pending->_pending_block_state->block_num); set_action_merkle(); set_trx_merkle(); + set_ext_merkle(); auto p = pending->_pending_block_state; p->id = p->header.id(); @@ -1605,9 +1712,9 @@ chainbase::database& controller::mutable_db()const { return my->db; } const fork_database& controller::fork_db()const { return my->fork_db; } -void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { +void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count, std::function signer) { validate_db_available_size(); - my->start_block(when, confirm_block_count, block_status::incomplete, optional() ); + my->start_block(when, confirm_block_count, block_status::incomplete, optional() , signer); } void controller::finalize_block() { @@ -1677,12 +1784,21 @@ void controller::set_actor_whitelist( const flat_set& new_actor_wh } void controller::set_actor_blacklist( const flat_set& new_actor_blacklist ) { my->conf.actor_blacklist = new_actor_blacklist; + + // *bos begin* + my->sync_name_list(list_type::actor_blacklist_type); + // *bos end* } void controller::set_contract_whitelist( const flat_set& new_contract_whitelist ) { my->conf.contract_whitelist = new_contract_whitelist; } void controller::set_contract_blacklist( const flat_set& new_contract_blacklist ) { my->conf.contract_blacklist = new_contract_blacklist; + + // *bos begin* + my->sync_name_list(list_type::contract_blacklist_type); + // *bos end* + } void controller::set_action_blacklist( const flat_set< pair >& new_action_blacklist ) { for (auto& act: new_action_blacklist) { @@ -1744,6 +1860,11 @@ optional controller::pending_producer_block_id()const { return my->pending->_producer_block_id; } +std::function controller::pending_producer_signer()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + return my->pending->_signer; +} + uint32_t controller::last_irreversible_block_num() const { return std::max(std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum), my->snapshot_head_block); } @@ -2092,10 +2213,19 @@ void controller::set_subjective_cpu_leeway(fc::microseconds leeway) { void controller::add_resource_greylist(const account_name &name) { my->conf.resource_greylist.insert(name); + + // *bos begin* + my->sync_name_list(list_type::resource_greylist_type); + // *bos end* } void controller::remove_resource_greylist(const account_name &name) { + my->conf.resource_greylist.erase(name); + + // *bos begin* + my->sync_name_list(list_type::resource_greylist_type); + // *bos end* } bool controller::is_resource_greylisted(const account_name &name) const { @@ -2106,4 +2236,21 @@ const flat_set &controller::get_resource_greylist() const { return my->conf.resource_greylist; } +// *bos begin* +const global_property2_object& controller::get_global_properties2()const { + return my->db.get(); +} + +void controller::set_name_list(int64_t list, int64_t action, std::vector name_list) +{ + //redundant sync + my->sync_name_list(list_type::actor_blacklist_type, true); + my->sync_name_list(list_type::contract_blacklist_type, true); + my->sync_name_list(list_type::resource_greylist_type, true); + + my->set_name_list(static_cast(list), static_cast(action), name_list); +} +// *bos end* + + } } /// eosio::chain diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index c56ed0add05..317453f19b8 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -142,7 +142,6 @@ namespace eosio { namespace chain { block_state_ptr fork_database::add( signed_block_ptr b, bool trust ) { EOS_ASSERT( b, fork_database_exception, "attempt to add null block" ); EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); - const auto& by_id_idx = my->index.get(); auto existing = by_id_idx.find( b->id() ); EOS_ASSERT( existing == by_id_idx.end(), fork_database_exception, "we already know about this block" ); @@ -203,7 +202,6 @@ namespace eosio { namespace chain { /// remove all of the invalid forks built of this id including this id void fork_database::remove( const block_id_type& id ) { vector remove_queue{id}; - for( uint32_t i = 0; i < remove_queue.size(); ++i ) { auto itr = my->index.find( remove_queue[i] ); if( itr != my->index.end() ) diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index a253d950358..2b68d015442 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -594,6 +594,7 @@ class apply_context { bool privileged = false; bool context_free = false; bool used_context_free_api = false; + uint64_t global_action_sequence = 0; generic_index idx64; generic_index idx128; diff --git a/libraries/chain/include/eosio/chain/block.hpp b/libraries/chain/include/eosio/chain/block.hpp index 0e85b167df8..28c49f9772e 100644 --- a/libraries/chain/include/eosio/chain/block.hpp +++ b/libraries/chain/include/eosio/chain/block.hpp @@ -51,6 +51,9 @@ namespace eosio { namespace chain { } }; + enum class block_extension_type : uint16_t { + bpsig_action_time_seed + }; /** */ diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index bf9cf0bedb8..723824b5310 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -4,6 +4,12 @@ namespace eosio { namespace chain { + /* Extended spatial data category + */ + enum block_header_extensions_type : uint16_t { + block_extensions_mroot = 0 // mroot of block extensions + }; + struct block_header { block_timestamp_type timestamp; @@ -32,13 +38,14 @@ namespace eosio { namespace chain { */ uint32_t schedule_version = 0; optional new_producers; - extensions_type header_extensions; + extensions_type header_extensions; // [0] : mroot of block extensions digest_type digest()const; block_id_type id() const; uint32_t block_num() const { return num_from_id(previous) + 1; } static uint32_t num_from_id(const block_id_type& id); + void set_block_extensions_mroot(digest_type& mroot); }; diff --git a/libraries/chain/include/eosio/chain/chain_config.hpp b/libraries/chain/include/eosio/chain/chain_config.hpp index 7f62ff111f9..907846d1a55 100644 --- a/libraries/chain/include/eosio/chain/chain_config.hpp +++ b/libraries/chain/include/eosio/chain/chain_config.hpp @@ -65,6 +65,24 @@ struct chain_config { bool operator==(const chain_config& a, const chain_config& b); inline bool operator!=(const chain_config& a, const chain_config& b) { return !(a == b); } +// *bos* +struct chain_config2 { + chain_config2( chainbase::allocator alloc ) + :actor_blacklist(alloc),contract_blacklist(alloc),resource_greylist(alloc){} + + shared_vector actor_blacklist; + shared_vector contract_blacklist; + shared_vector resource_greylist; + + void validate()const; +}; + +// *bos* +struct guaranteed_minimum_resources { + uint64_t ram_byte; + uint64_t cpu_us; + uint64_t net_byte; +}; } } // namespace eosio::chain FC_REFLECT(eosio::chain::chain_config, @@ -79,3 +97,6 @@ FC_REFLECT(eosio::chain::chain_config, (max_inline_action_size)(max_inline_action_depth)(max_authority_depth) ) +// *bos* +FC_REFLECT( eosio::chain::chain_config2, (actor_blacklist)(contract_blacklist)(resource_greylist) ) +FC_REFLECT( eosio::chain::guaranteed_minimum_resources, (ram_byte)(cpu_us)(net_byte) ) diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index c0e9806319e..6aea7c3e3bd 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -5,6 +5,7 @@ #pragma once #include #include +#include "config_xos.hpp" #pragma GCC diagnostic ignored "-Wunused-variable" diff --git a/libraries/chain/include/eosio/chain/config_xos.hpp b/libraries/chain/include/eosio/chain/config_xos.hpp new file mode 100644 index 00000000000..02adf0d54d0 --- /dev/null +++ b/libraries/chain/include/eosio/chain/config_xos.hpp @@ -0,0 +1,23 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once +#include +#include + +#pragma GCC diagnostic ignored "-Wunused-variable" + +namespace eosio { namespace chain { namespace config { + +//guaranteed minimum resources which is abbreviated gmr +const static uint32_t default_gmr_cpu_limit = 200'000; /// free cpu usage in microseconds +const static uint32_t default_gmr_net_limit = 10 * 1024; // 10 KB +const static uint32_t default_gmr_ram_limit = 0; // 0 KB +const static uint16_t default_gmr_resource_limit_per_day = 1000; + + + +} } } // namespace eosio::chain::config + + diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index ec7b53fafc0..17ca27b3235 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -27,6 +27,7 @@ namespace eosio { namespace chain { class dynamic_global_property_object; class global_property_object; + class global_property2_object; // *bos* class permission_object; class account_object; using resource_limits::resource_limits_manager; @@ -45,6 +46,21 @@ namespace eosio { namespace chain { FULL, LIGHT }; + // *bos begin* + enum class list_type:int64_t { + actor_blacklist_type=1, + contract_blacklist_type, + resource_greylist_type, + list_type_count + }; + enum class list_action_type:int64_t + { + insert_type = 1, + remove_type, + list_action_type_count + }; + + // *bos end* class controller { public: @@ -95,7 +111,7 @@ namespace eosio { namespace chain { * Starts a new pending block session upon which new transactions can * be pushed. */ - void start_block( block_timestamp_type time = block_timestamp_type(), uint16_t confirm_block_count = 0 ); + void start_block( block_timestamp_type time = block_timestamp_type(), uint16_t confirm_block_count = 0, std::function signer = nullptr ); void abort_block(); @@ -190,6 +206,7 @@ namespace eosio { namespace chain { optional pending_producer_block_id()const; const producer_schedule_type& active_producers()const; + std::function pending_producer_signer()const; const producer_schedule_type& pending_producers()const; optional proposed_producers()const; @@ -216,6 +233,15 @@ namespace eosio { namespace chain { void add_resource_greylist(const account_name &name); void remove_resource_greylist(const account_name &name); + + // *bos begin* + const global_property2_object& get_global_properties2()const; // *bos* + void set_name_list(int64_t list, int64_t action, std::vector name_list); + + // void list_add_name(const int list, const account_name &name); + // void list_remove_name(const int list, const account_name &name); + // *bos end* + bool is_resource_greylisted(const account_name &name) const; const flat_set &get_resource_greylist() const; diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index fe9ae85db10..6f4f35ffd80 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -34,6 +34,15 @@ namespace eosio { namespace chain { chain_config configuration; }; + // *bos* + class global_property2_object : public chainbase::object + { + OBJECT_CTOR(global_property2_object, (cfg)) + + id_type id; + chain_config2 cfg; + guaranteed_minimum_resources gmr;//guaranteed_minimum_resources + }; /** @@ -71,11 +80,22 @@ namespace eosio { namespace chain { > >; + // *bos* + using global_property2_multi_index = chainbase::shared_multi_index_container< + global_property2_object, + indexed_by< + ordered_unique, + BOOST_MULTI_INDEX_MEMBER(global_property2_object, global_property2_object::id_type, id) + > + > + >; }} CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property_object, eosio::chain::global_property_multi_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::dynamic_global_property_object, eosio::chain::dynamic_global_property_multi_index) +// *bos* +CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property2_object, eosio::chain::global_property2_multi_index) FC_REFLECT(eosio::chain::dynamic_global_property_object, (global_action_sequence) @@ -84,3 +104,7 @@ FC_REFLECT(eosio::chain::dynamic_global_property_object, FC_REFLECT(eosio::chain::global_property_object, (proposed_schedule_block_num)(proposed_schedule)(configuration) ) +// *bos* +FC_REFLECT(eosio::chain::global_property2_object, + (cfg)(gmr) + ) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index 4b0c58beeb0..616deb3f2a0 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -35,6 +35,16 @@ namespace eosio { namespace chain { namespace resource_limits { int64_t max = 0; ///< max per window under current congestion }; + + struct gmr_parameters { + uint64_t ram_byte; + uint64_t cpu_us; + uint64_t net_byte; + + void validate()const; // throws if the parameters do not satisfy basic sanity checks + }; + + class resource_limits_manager { public: explicit resource_limits_manager(chainbase::database& db) @@ -50,6 +60,14 @@ namespace eosio { namespace chain { namespace resource_limits { void initialize_account( const account_name& account ); void set_block_parameters( const elastic_limit_parameters& cpu_limit_parameters, const elastic_limit_parameters& net_limit_parameters ); + + /** + * @brief Set the guaranteed minimum resources parameters object + * + * @param res_parameters guaranteed minimum resources parameters object include ram net cpu + */ + void set_gmr_parameters( const gmr_parameters& res_parameters ); // *bos* //guaranteed minimum resources which is abbreviated gmr + void update_account_usage( const flat_set& accounts, uint32_t ordinal ); void add_transaction_usage( const flat_set& accounts, uint64_t cpu_usage, uint64_t net_usage, uint32_t ordinal ); @@ -58,7 +76,7 @@ namespace eosio { namespace chain { namespace resource_limits { /// set_account_limits returns true if new ram_bytes limit is more restrictive than the previously set one bool set_account_limits( const account_name& account, int64_t ram_bytes, int64_t net_weight, int64_t cpu_weight); - void get_account_limits( const account_name& account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight) const; + void get_account_limits( const account_name& account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight, bool raw = false) const; // *bos* add raw void process_account_limit_updates(); void process_block_usage( uint32_t block_num ); @@ -86,3 +104,5 @@ namespace eosio { namespace chain { namespace resource_limits { FC_REFLECT( eosio::chain::resource_limits::account_resource_limit, (used)(available)(max) ) FC_REFLECT( eosio::chain::resource_limits::ratio, (numerator)(denominator)) FC_REFLECT( eosio::chain::resource_limits::elastic_limit_parameters, (target)(max)(periods)(max_multiplier)(contract_rate)(expand_rate)) + +FC_REFLECT( eosio::chain::resource_limits::gmr_parameters, (ram_byte)(cpu_us)(net_byte)) diff --git a/libraries/chain/include/eosio/chain/resource_limits_private.hpp b/libraries/chain/include/eosio/chain/resource_limits_private.hpp index 687a56a4d90..877c77e5acb 100644 --- a/libraries/chain/include/eosio/chain/resource_limits_private.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits_private.hpp @@ -203,6 +203,24 @@ namespace eosio { namespace chain { namespace resource_limits { > >; + class gmr_config_object : public chainbase::object { + OBJECT_CTOR(gmr_config_object); + id_type id; + + + gmr_parameters res_parameters = { config::default_gmr_ram_limit,config::default_gmr_cpu_limit, config::default_gmr_net_limit}; + + }; + + using gmr_config_index = chainbase::shared_multi_index_container< + gmr_config_object, + indexed_by< + ordered_unique, member> + > + >; + + + class resource_limits_state_object : public chainbase::object { OBJECT_CTOR(resource_limits_state_object); id_type id; @@ -265,9 +283,14 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_usage_object, CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_config_object, eosio::chain::resource_limits::resource_limits_config_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_state_object, eosio::chain::resource_limits::resource_limits_state_index) +CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::gmr_config_object, eosio::chain::resource_limits::gmr_config_index) + FC_REFLECT(eosio::chain::resource_limits::usage_accumulator, (last_ordinal)(value_ex)(consumed)) FC_REFLECT(eosio::chain::resource_limits::resource_limits_object, (owner)(net_weight)(cpu_weight)(ram_bytes)) FC_REFLECT(eosio::chain::resource_limits::resource_usage_object, (owner)(net_usage)(cpu_usage)(ram_usage)) FC_REFLECT(eosio::chain::resource_limits::resource_limits_config_object, (cpu_limit_parameters)(net_limit_parameters)(account_cpu_usage_average_window)(account_net_usage_average_window)) -FC_REFLECT(eosio::chain::resource_limits::resource_limits_state_object, (average_block_net_usage)(average_block_cpu_usage)(pending_net_usage)(pending_cpu_usage)(total_net_weight)(total_cpu_weight)(total_ram_bytes)(virtual_net_limit)(virtual_cpu_limit)) \ No newline at end of file +FC_REFLECT(eosio::chain::resource_limits::resource_limits_state_object, (average_block_net_usage)(average_block_cpu_usage)(pending_net_usage)(pending_cpu_usage)(total_net_weight)(total_cpu_weight)(total_ram_bytes)(virtual_net_limit)(virtual_cpu_limit)) + + +FC_REFLECT(eosio::chain::resource_limits::gmr_config_object, (res_parameters)) diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 4610f24c891..028b050a595 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -148,6 +148,7 @@ namespace eosio { namespace chain { index_double_object_type, index_long_double_object_type, global_property_object_type, + global_property2_object_type, dynamic_global_property_object_type, block_summary_object_type, transaction_object_type, @@ -169,6 +170,7 @@ namespace eosio { namespace chain { resource_usage_object_type, resource_limits_state_object_type, resource_limits_config_object_type, + gmr_config_object_type, ///< Defined by bos account_history_object_type, ///< Defined by history_plugin action_history_object_type, ///< Defined by history_plugin reversible_block_object_type, diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index fa38f76a1e2..21865ac8676 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -13,7 +14,8 @@ using resource_index_set = index_set< resource_limits_index, resource_usage_index, resource_limits_state_index, - resource_limits_config_index + resource_limits_config_index, + gmr_config_index >; static_assert( config::rate_limiting_precision > 0, "config::rate_limiting_precision must be positive" ); @@ -36,6 +38,13 @@ void elastic_limit_parameters::validate()const { EOS_ASSERT( expand_rate.denominator > 0, resource_limit_exception, "elastic limit parameter 'expand_rate' is not a well-defined ratio" ); } +void gmr_parameters::validate()const { + + EOS_ASSERT( cpu_us > 0, resource_limit_exception, "guaranteed minmum resources parameter 'cpu_us' cannot be zero" ); + EOS_ASSERT( net_byte > 0, resource_limit_exception, "guaranteed minmum resources parameter 'net_byte' cannot be zero" ); + EOS_ASSERT( ram_byte >= 0, resource_limit_exception, "guaranteed minmum resources parameter'ram_byte' cannot be less than zero" ); +} + void resource_limits_state_object::update_virtual_cpu_limit( const resource_limits_config_object& cfg ) { //idump((average_block_cpu_usage.average())); @@ -56,6 +65,10 @@ void resource_limits_manager::initialize_database() { // see default settings in the declaration }); + const auto& gmr_config = _db.create([](gmr_config_object& config){ + // see default settings in the declaration + }); + _db.create([&config](resource_limits_state_object& state){ // see default settings in the declaration @@ -108,6 +121,15 @@ void resource_limits_manager::set_block_parameters(const elastic_limit_parameter }); } +//guaranteed minimum resources which is abbreviated gmr +void resource_limits_manager::set_gmr_parameters(const gmr_parameters& res_parameters) { + res_parameters.validate(); + const auto& config = _db.get(); + _db.modify(config, [&](gmr_config_object& c){ + c.res_parameters = res_parameters; + }); +} + void resource_limits_manager::update_account_usage(const flat_set& accounts, uint32_t time_slot ) { const auto& config = _db.get(); for( const auto& a : accounts ) { @@ -123,6 +145,10 @@ void resource_limits_manager::add_transaction_usage(const flat_set const auto& state = _db.get(); const auto& config = _db.get(); + + //guaranteed minimum resources which is abbreviated gmr + const auto& gmr = _db.get().res_parameters; // *bos* + for( const auto& a : accounts ) { const auto& usage = _db.get( a ); @@ -144,7 +170,7 @@ void resource_limits_manager::add_transaction_usage(const flat_set uint128_t user_weight = (uint128_t)cpu_weight; uint128_t all_user_weight = state.total_cpu_weight; - auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight; + auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight+gmr.cpu_us; EOS_ASSERT( cpu_used_in_window <= max_user_use_in_window, tx_cpu_usage_exceeded, @@ -163,7 +189,7 @@ void resource_limits_manager::add_transaction_usage(const flat_set uint128_t user_weight = (uint128_t)net_weight; uint128_t all_user_weight = state.total_net_weight; - auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight; + auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight+gmr.net_byte; EOS_ASSERT( net_used_in_window <= max_user_use_in_window, tx_net_usage_exceeded, @@ -269,8 +295,9 @@ bool resource_limits_manager::set_account_limits( const account_name& account, i return decreased_limit; } -void resource_limits_manager::get_account_limits( const account_name& account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight ) const { +void resource_limits_manager::get_account_limits( const account_name& account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight, bool raw ) const { const auto* pending_buo = _db.find( boost::make_tuple(true, account) ); + const auto& gmr = _db.get().res_parameters; // *bos* if (pending_buo) { ram_bytes = pending_buo->ram_bytes; net_weight = pending_buo->net_weight; @@ -281,6 +308,13 @@ void resource_limits_manager::get_account_limits( const account_name& account, i net_weight = buo.net_weight; cpu_weight = buo.cpu_weight; } + + // *bos* + const int64_t ONEKM = 1024; + if (!raw && ram_bytes >= ONEKM) + { + ram_bytes += gmr.ram_byte; + } } @@ -373,6 +407,7 @@ account_resource_limit resource_limits_manager::get_account_cpu_limit_ex( const const auto& state = _db.get(); const auto& usage = _db.get(name); const auto& config = _db.get(); + const auto& gmr = _db.get().res_parameters; // *bos* int64_t cpu_weight, x, y; get_account_limits( name, x, y, cpu_weight ); @@ -389,7 +424,7 @@ account_resource_limit resource_limits_manager::get_account_cpu_limit_ex( const uint128_t user_weight = (uint128_t)cpu_weight; uint128_t all_user_weight = (uint128_t)state.total_cpu_weight; - auto max_user_use_in_window = (virtual_cpu_capacity_in_window * user_weight) / all_user_weight; + auto max_user_use_in_window = (virtual_cpu_capacity_in_window * user_weight) / all_user_weight + gmr.cpu_us; auto cpu_used_in_window = impl::integer_divide_ceil((uint128_t)usage.cpu_usage.value_ex * window_size, (uint128_t)config::rate_limiting_precision); if( max_user_use_in_window <= cpu_used_in_window ) @@ -411,6 +446,7 @@ account_resource_limit resource_limits_manager::get_account_net_limit_ex( const const auto& config = _db.get(); const auto& state = _db.get(); const auto& usage = _db.get(name); + const auto& gmr = _db.get().res_parameters; // *bos* int64_t net_weight, x, y; get_account_limits( name, x, net_weight, y ); @@ -428,7 +464,7 @@ account_resource_limit resource_limits_manager::get_account_net_limit_ex( const uint128_t all_user_weight = (uint128_t)state.total_net_weight; - auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight; + auto max_user_use_in_window = (virtual_network_capacity_in_window * user_weight) / all_user_weight + gmr.net_byte; auto net_used_in_window = impl::integer_divide_ceil((uint128_t)usage.net_usage.value_ex * window_size, (uint128_t)config::rate_limiting_precision); if( max_user_use_in_window <= net_used_in_window ) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index f03fb71f445..f63d836d377 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -148,7 +148,7 @@ class privileged_api : public context_aware_api { } void get_resource_limits( account_name account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight ) { - context.control.get_resource_limits_manager().get_account_limits( account, ram_bytes, net_weight, cpu_weight); + context.control.get_resource_limits_manager().get_account_limits( account, ram_bytes, net_weight, cpu_weight, true); // *bos* add raw=true } int64_t set_proposed_producers( array_ptr packed_producer_schedule, size_t datalen) { @@ -192,6 +192,42 @@ class privileged_api : public context_aware_api { }); } + // *bos begin* + void set_name_list_packed(int64_t list, int64_t action, array_ptr packed_name_list, size_t datalen) + { + int64_t lstbegin = static_cast(list_type::actor_blacklist_type ); + int64_t lstend = static_cast(list_type::list_type_count); + int64_t actbegin = static_cast(list_action_type::insert_type); + int64_t actend = static_cast(list_action_type::list_action_type_count); + EOS_ASSERT(list >= lstbegin && list < lstend, wasm_execution_error, "unkown name list!"); + EOS_ASSERT(action >= actbegin && action < actend, wasm_execution_error, "unkown action"); + + datastream ds(packed_name_list, datalen); + std::vector name_list; // TODO std::set dosen't work, bug. + fc::raw::unpack(ds, name_list); + + context.control.set_name_list(list, action, name_list); + + + } + + void set_guaranteed_minimum_resources(int64_t ram_byte, int64_t cpu_us, int64_t net_byte) + { + EOS_ASSERT(ram_byte >= 0 && ram_byte <= 100 * 1024, wasm_execution_error, "resouces minimum guarantee for ram limit expected [0, 102400]"); + EOS_ASSERT(cpu_us >= 0 && cpu_us <= 100 * 1000, wasm_execution_error, "resouces minimum guarantee for cpu limit expected [0, 100000]"); + EOS_ASSERT(net_byte >= 0 && net_byte <= 100 * 1024, wasm_execution_error, "resouces minimum guarantee for net limit expected [0, 102400]"); + + //guaranteed minimum resources which is abbreviated gmr + context.db.modify(context.control.get_global_properties2(), + [&](auto &gprops2) { + gprops2.gmr.ram_byte = ram_byte; + gprops2.gmr.cpu_us = cpu_us; + gprops2.gmr.net_byte = net_byte; + }); + } + + // *bos end* + bool is_privileged( account_name n )const { return context.db.get( n ).privileged; } @@ -1362,6 +1398,14 @@ class context_free_transaction_api : public context_aware_api { return context.get_packed_transaction().size(); } + void get_transaction_id( fc::sha256& id ) { + id = context.trx_context.id; + } + + void get_action_sequence(uint64_t& seq){ + seq = context.global_action_sequence; + } + int expiration() { return context.trx_context.trx.expiration.sec_since_epoch(); } @@ -1644,6 +1688,81 @@ class call_depth_api : public context_aware_api { } }; + +class action_seed_api : public context_aware_api { +public: + action_seed_api(apply_context& ctx) + : context_aware_api(ctx) {} + + int bpsig_action_time_seed(array_ptr sig, size_t siglen) { + auto data = action_timestamp(); + fc::sha256::encoder encoder; + encoder.write(reinterpret_cast(data.data()), data.size()* sizeof(uint32_t)); + auto digest = encoder.result(); + optional signature; + auto block_state = context.control.pending_block_state(); + for (auto& extension: block_state->block->block_extensions) { + if (extension.first != static_cast(block_extension_type::bpsig_action_time_seed)) continue; + EOS_ASSERT(extension.second.size() > 8, transaction_exception, "invalid producer signature in block extensions"); + uint64_t* act_parts = reinterpret_cast(extension.second.data()); + if ( act_parts[0] != context.global_action_sequence) continue; + + auto sig_data = extension.second.data() + 8; + auto sig_size = extension.second.size() - 8; + signature.emplace(); + datastream ds(sig_data, sig_size); + fc::raw::unpack(ds, *signature); + auto check = fc::crypto::public_key(*signature, digest, false); + EOS_ASSERT( check == block_state->block_signing_key, transaction_exception, "wrong expected key different than recovered key" ); + break; + } + bool sign = false; + if (context.control.is_producing_block() && !signature) { + auto signer = context.control.pending_producer_signer(); + if (signer) { + // Producer is producing this block + signature = signer(digest); + sign = true; + } else { + // Non-producer is speculating this block, so skips the signing + // TODO: speculating result will be different from producing result + signature.emplace(); + } + } + EOS_ASSERT(!!signature, transaction_exception, "empty sig action seed"); + auto& s = *signature; + auto sig_size = fc::raw::pack_size(s); + if (siglen == 0) return sig_size; + if (sig_size <= siglen) { + datastream ds(sig, sig_size); + fc::raw::pack(ds, s); + if (sign) { + block_state->block->block_extensions.emplace_back(); + char* act_parts = reinterpret_cast(&context.global_action_sequence); + auto &extension = block_state->block->block_extensions.back(); + extension.first = static_cast(block_extension_type::bpsig_action_time_seed); + extension.second.resize(8 + sig_size); + std::copy(act_parts, act_parts + 8, extension.second.data()); + std::copy((char*)sig, (char*)sig + sig_size, extension.second.data() + 8); + } + return sig_size; + } + return 0; + } +private: + vector action_timestamp() { + auto current = context.control.pending_block_time().time_since_epoch().count(); + current -= current % (config::block_interval_us); + + uint32_t* current_halves = reinterpret_cast(¤t); + uint32_t* act_parts = reinterpret_cast(&context.global_action_sequence); + return vector{act_parts[0],act_parts[1], current_halves[0], current_halves[1]}; + } +}; +REGISTER_INTRINSICS(action_seed_api, +(bpsig_action_time_seed, int(int, int) ) +); + REGISTER_INJECTED_INTRINSICS(call_depth_api, (call_depth_assert, void() ) ); @@ -1702,6 +1821,8 @@ REGISTER_INTRINSICS(privileged_api, (set_proposed_producers, int64_t(int,int) ) (get_blockchain_parameters_packed, int(int, int) ) (set_blockchain_parameters_packed, void(int,int) ) + (set_name_list_packed, void(int64_t,int64_t,int,int) ) + (set_guaranteed_minimum_resources, void(int64_t,int64_t,int64_t) ) (is_privileged, int(int64_t) ) (set_privileged, void(int64_t, int) ) ); @@ -1823,6 +1944,8 @@ REGISTER_INTRINSICS(console_api, REGISTER_INTRINSICS(context_free_transaction_api, (read_transaction, int(int, int) ) (transaction_size, int() ) + (get_transaction_id, void(int) ) + (get_action_sequence, void(int) ) (expiration, int() ) (tapos_block_prefix, int() ) (tapos_block_num, int() ) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 9a4f4094330..22e52407953 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -140,7 +140,7 @@ namespace eosio { namespace testing { return traces; } - void push_genesis_block(); + void push_genesis_block(); vector get_producer_keys( const vector& producer_names )const; transaction_trace_ptr set_producers(const vector& producer_names); diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 9b0b17b9d0a..9d4a6ab93b3 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -19,6 +19,8 @@ add_subdirectory(mongo_db_plugin) add_subdirectory(login_plugin) add_subdirectory(test_control_plugin) add_subdirectory(test_control_api_plugin) +add_subdirectory(kafka_plugin) +add_subdirectory(notify_plugin) # Forward variables to top level so packaging picks them up set(CPACK_DEBIAN_PACKAGE_DEPENDS ${CPACK_DEBIAN_PACKAGE_DEPENDS} PARENT_SCOPE) diff --git a/plugins/history_api_plugin/history_api_plugin.cpp b/plugins/history_api_plugin/history_api_plugin.cpp index bd78dede086..286e97f6e54 100644 --- a/plugins/history_api_plugin/history_api_plugin.cpp +++ b/plugins/history_api_plugin/history_api_plugin.cpp @@ -32,17 +32,15 @@ void history_api_plugin::plugin_initialize(const variables_map&) {} }} #define CHAIN_RO_CALL(call_name) CALL(history, ro_api, history_apis::read_only, call_name) -//#define CHAIN_RW_CALL(call_name) CALL(history, rw_api, history_apis::read_write, call_name) void history_api_plugin::plugin_startup() { ilog( "starting history_api_plugin" ); auto ro_api = app().get_plugin().get_read_only_api(); - //auto rw_api = app().get_plugin().get_read_write_api(); app().get_plugin().add_api({ -// CHAIN_RO_CALL(get_transaction), CHAIN_RO_CALL(get_actions), CHAIN_RO_CALL(get_transaction), + CHAIN_RO_CALL(get_block_detail), CHAIN_RO_CALL(get_key_accounts), CHAIN_RO_CALL(get_controlled_accounts) }); diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 8245215b061..f3838726726 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -372,8 +372,9 @@ namespace eosio { namespace history_apis { + read_only::get_actions_result read_only::get_actions( const read_only::get_actions_params& params )const { - edump((params)); + edump((params)); auto& chain = history->chain_plug->chain(); const auto& db = chain.db(); const auto abi_serializer_max_time = history->chain_plug->get_abi_serializer_max_time(); @@ -564,6 +565,94 @@ namespace eosio { return result; } + fc::variant read_only::get_block_detail(const read_only::get_block_detail_params& params) const { + static char const TRANSACTIONS[] = "transactions"; + static char const TRX[] = "trx"; + static char const ID[] = "id"; + static char const TRACES[] = "traces"; + + auto & plugin = history->chain_plug; + auto & chain = plugin->chain(); + + auto get_object_value = [](fc::variant const& src, char const * key) -> fc::variant const & { + static auto const null_variant = fc::variant(); + + if ( !src.is_object() ) + return null_variant; + + auto & obj = src.get_object(); + auto const & itr = obj.find(key); + if ( itr == obj.end() ) + return null_variant; + + return itr->value(); + }; + + auto get_tx_array = [&get_object_value](fc::variant const& block) -> fc::variants const & { + static auto const null_variants = fc::variants(); + + auto & value = get_object_value(block, TRANSACTIONS); + if ( !value.is_array() ) + return null_variants; + + return value.get_array(); + }; + + auto get_tx_id = [&get_object_value](fc::variant const& tx) -> optional { + auto & id = get_object_value(get_object_value(tx, TRX), ID); + if ( !id.is_string() ) + return fc::optional(); + + return fc::optional(id.get_string()); + }; + + auto const & src = plugin->get_read_only_api().get_block( + chain_apis::read_only::get_block_params { + /*block_num_or_id = */ params.block_num_or_id + } + ); + + auto & rhs = get_tx_array(src); + if ( rhs.empty() ) + return src; + + auto lhs = fc::variants(); + lhs.reserve(rhs.size()); + + auto & database = chain.db(); + auto & index = database.get_index(); + auto const abi_serializer_max_time = plugin->get_abi_serializer_max_time(); + for ( auto const & tx : rhs ) { + auto maybe_id = get_tx_id(tx); + if ( maybe_id ) { + auto id = *maybe_id; + auto itr = index.lower_bound(boost::make_tuple(id)); + auto traces = fc::variants(); + + while ( itr != index.end() && itr->trx_id == id ) { + + fc::datastream ds( itr->packed_action_trace.data(), itr->packed_action_trace.size() ); + action_trace t; + fc::raw::unpack( ds, t ); + traces.emplace_back( chain.to_variant_with_abi(t, abi_serializer_max_time) ); + + ++itr; + } + + if ( !traces.empty() ) { + auto new_trx = fc::mutable_variant_object(tx[TRX])(TRACES, traces); + auto new_tx = fc::mutable_variant_object(tx).set(TRX, move(new_trx)); + lhs.emplace_back(move(new_tx)); + continue; + } + } + + lhs.emplace_back(tx); + } + + return fc::mutable_variant_object(src).set(TRANSACTIONS, move(lhs)); + } + read_only::get_key_accounts_results read_only::get_key_accounts(const get_key_accounts_params& params) const { std::set accounts; const auto& db = history->chain_plug->chain().db(); @@ -586,6 +675,4 @@ namespace eosio { } /// history_apis - - } /// namespace eosio diff --git a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp index b6801b30a29..838f9b24662 100644 --- a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp +++ b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp @@ -50,7 +50,6 @@ class read_only { optional time_limit_exceeded_error; }; - get_actions_result get_actions( const get_actions_params& )const; @@ -71,8 +70,6 @@ class read_only { get_transaction_result get_transaction( const get_transaction_params& )const; - - /* struct ordered_transaction_results { uint32_t seq_num; @@ -83,6 +80,12 @@ class read_only { get_transactions_results get_transactions(const get_transactions_params& params) const; */ + struct get_block_detail_params { + string block_num_or_id; + }; + + fc::variant get_block_detail(const get_block_detail_params& params) const; + struct get_key_accounts_params { chain::public_key_type public_key; @@ -150,7 +153,10 @@ FC_REFLECT(eosio::history_apis::read_only::get_transactions_params, (account_nam FC_REFLECT(eosio::history_apis::read_only::ordered_transaction_results, (seq_num)(transaction_id)(transaction) ) FC_REFLECT(eosio::history_apis::read_only::get_transactions_results, (transactions)(time_limit_exceeded_error) ) */ -FC_REFLECT(eosio::history_apis::read_only::get_key_accounts_params, (public_key) ) -FC_REFLECT(eosio::history_apis::read_only::get_key_accounts_results, (account_names) ) -FC_REFLECT(eosio::history_apis::read_only::get_controlled_accounts_params, (controlling_account) ) -FC_REFLECT(eosio::history_apis::read_only::get_controlled_accounts_results, (controlled_accounts) ) + +FC_REFLECT( eosio::history_apis::read_only::get_block_detail_params, (block_num_or_id) ) + +FC_REFLECT( eosio::history_apis::read_only::get_key_accounts_params, (public_key) ) +FC_REFLECT( eosio::history_apis::read_only::get_key_accounts_results, (account_names) ) +FC_REFLECT( eosio::history_apis::read_only::get_controlled_accounts_params, (controlling_account) ) +FC_REFLECT( eosio::history_apis::read_only::get_controlled_accounts_results, (controlled_accounts) ) diff --git a/plugins/kafka_plugin/CMakeLists.txt b/plugins/kafka_plugin/CMakeLists.txt new file mode 100644 index 00000000000..62f6127148f --- /dev/null +++ b/plugins/kafka_plugin/CMakeLists.txt @@ -0,0 +1,10 @@ +file(GLOB HEADERS "*.hpp") +add_library(kafka_plugin + kafka_plugin.cpp kafka.cpp try_handle.cpp + ${HEADERS}) + +find_package(Cppkafka) +find_package(RdKafka) + +target_include_directories(kafka_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ${CPPKAFKA_INCLUDE_DIR}) +target_link_libraries(kafka_plugin chain_plugin appbase ${CPPKAFKA_LIBRARY} RdKafka::rdkafka) diff --git a/plugins/kafka_plugin/fifo.h b/plugins/kafka_plugin/fifo.h new file mode 100644 index 00000000000..c65dbe2bf81 --- /dev/null +++ b/plugins/kafka_plugin/fifo.h @@ -0,0 +1,85 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace kafka { + +constexpr std::size_t FIFO_MAX_PUSH_SIZE = 1024; +constexpr std::size_t FIFO_MAX_POP_SIZE = 1024; + +template +class fifo : public boost::noncopyable { +public: + fifo(std::size_t max_push_size = FIFO_MAX_PUSH_SIZE, std::size_t max_pop_size = FIFO_MAX_POP_SIZE); + void push(const T& element); + std::vector pop(); + bool empty(); + void awaken(); + +private: + std::mutex mux_; + std::condition_variable not_empty_cv_; + std::condition_variable not_full_cv_; + bool non_blocking_{}; + std::deque deque_; + std::size_t max_push_size_; + std::size_t max_pop_size_; +}; + +template +fifo::fifo(std::size_t max_push_size, std::size_t max_pop_size) { + max_push_size_ = max_push_size; + max_pop_size_ = max_pop_size; +} + +template +void fifo::push(const T& element) { + std::unique_lock lock(mux_); + + if (deque_.size() >= max_push_size_) { + not_full_cv_.wait(lock, [&] { + return non_blocking_ || deque_.size() < max_push_size_; + }); + } + + deque_.push_back(element); + not_empty_cv_.notify_one(); +} + +template +std::vector fifo::pop() { + std::unique_lock lock(mux_); + if (deque_.empty()) { + not_empty_cv_.wait(lock, [&] { + return non_blocking_ || !deque_.empty(); + }); + } + + std::vector result; + for (std::size_t i = 0; i < max_pop_size_ && !deque_.empty(); ++i) { + result.push_back(std::move(deque_.front())); + deque_.pop_front(); + } + not_full_cv_.notify_all(); + return result; +} + +template +bool fifo::empty() { + std::unique_lock lock(mux_); + return deque_.empty(); +} + +template +void fifo::awaken() { + non_blocking_ = true; + not_empty_cv_.notify_all(); + not_full_cv_.notify_all(); +} + +} diff --git a/plugins/kafka_plugin/kafka.cpp b/plugins/kafka_plugin/kafka.cpp new file mode 100644 index 00000000000..86abbb6d62c --- /dev/null +++ b/plugins/kafka_plugin/kafka.cpp @@ -0,0 +1,186 @@ +#include "kafka.hpp" + +#include + +#include "try_handle.hpp" + +namespace std { +template<> struct hash { + typedef kafka::bytes argument_type; + typedef size_t result_type; + result_type operator()(argument_type const& s) const noexcept { + return std::hash{}(string(s.begin(), s.end())); + } +}; +} + +namespace kafka { + +using chain::account_name; +using chain::action_name; +using chain::block_id_type; +using chain::permission_name; +using chain::transaction; +using chain::signed_transaction; +using chain::signed_block; +using chain::transaction_id_type; + +namespace { + +inline bytes checksum_bytes(const fc::sha256& s) { return bytes(s.data(), s.data() + sizeof(fc::sha256)); } + +TransactionStatus transactionStatus(fc::enum_type status) { + if (status == chain::transaction_receipt::executed) return TransactionStatus::executed; + else if (status == chain::transaction_receipt::soft_fail) return TransactionStatus::soft_fail; + else if (status == chain::transaction_receipt::hard_fail) return TransactionStatus::hard_fail; + else if (status == chain::transaction_receipt::delayed) return TransactionStatus::delayed; + else if (status == chain::transaction_receipt::expired) return TransactionStatus::expired; + else return TransactionStatus::unknown; +} + +} + +void kafka::set_config(Configuration config) { + config_ = config; +} + +void kafka::set_topics(const string& block_topic, const string& tx_topic, const string& tx_trace_topic, const string& action_topic) { + block_topic_ = block_topic; + tx_topic_ = tx_topic; + tx_trace_topic_ = tx_trace_topic; + action_topic_ = action_topic; +} + +void kafka::set_partition(int partition) { + partition_ = partition; +} + +void kafka::start() { + producer_ = std::make_unique(config_); + + auto conf = producer_->get_configuration().get_all(); + ilog("Kafka config: ${conf}", ("conf", conf)); +} + +void kafka::stop() { + producer_->flush(); + + producer_.reset(); +} + +void kafka::push_block(const chain::block_state_ptr& block_state, bool irreversible) { + const auto& header = block_state->header; + auto b = std::make_shared(); + + b->id = checksum_bytes(block_state->id); + b->num = block_state->block_num; + b->timestamp = header.timestamp; + + b->lib = irreversible; + + b->block = fc::raw::pack(*block_state->block); + b->tx_count = static_cast(block_state->block->transactions.size()); + + uint16_t seq{}; + for (const auto& tx_receipt: block_state->block->transactions) { + auto count = push_transaction(tx_receipt, b, seq++); + b->action_count += count.first; + b->context_free_action_count += count.second; + } + + consume_block(b); +} + +std::pair kafka::push_transaction(const chain::transaction_receipt& tx_receipt, const BlockPtr& block, uint16_t block_seq) { + auto t = std::make_shared(); + if(tx_receipt.trx.contains()) { + t->id = checksum_bytes(tx_receipt.trx.get()); + } else { + auto signed_tx = tx_receipt.trx.get().get_signed_transaction(); + t->id = checksum_bytes(signed_tx.id()); + t->action_count = static_cast(signed_tx.actions.size()); + t->context_free_action_count = static_cast(signed_tx.context_free_actions.size()); + } + t->block_id = block->id; + t->block_num = block->num; + t->block_time = block->timestamp; + t->block_seq = block_seq; + + consume_transaction(t); + + return {t->action_count, t->context_free_action_count}; +} + +void kafka::push_transaction_trace(const chain::transaction_trace_ptr& tx_trace) { + auto t = std::make_shared(); + + t->id = checksum_bytes(tx_trace->id); + t->block_num = tx_trace->block_num; + t->scheduled = tx_trace->scheduled; + if (tx_trace->receipt) { + t->status = transactionStatus(tx_trace->receipt->status); + t->cpu_usage_us = tx_trace->receipt->cpu_usage_us; + t->net_usage_words = tx_trace->receipt->net_usage_words; + } + if (tx_trace->except) { + t->exception = tx_trace->except->to_string(); + } + + consume_transaction_trace(t); + + for (auto& action_trace: tx_trace->action_traces) { + push_action(action_trace, 0, t); // 0 means no parent + } +} + +void kafka::push_action(const chain::action_trace& action_trace, uint64_t parent_seq, const TransactionTracePtr& tx) { + auto a = std::make_shared(); + + a->global_seq = action_trace.receipt.global_sequence; + a->recv_seq = action_trace.receipt.recv_sequence; + a->parent_seq = parent_seq; + a->account = action_trace.act.account; + a->name = action_trace.act.name; + if (not action_trace.act.authorization.empty()) a->auth = fc::raw::pack(action_trace.act.authorization); + a->data = action_trace.act.data; + a->receiver = action_trace.receipt.receiver; + if (not action_trace.receipt.auth_sequence.empty()) a->auth_seq = fc::raw::pack(action_trace.receipt.auth_sequence); + a->code_seq = action_trace.receipt.code_sequence; + a->abi_seq = action_trace.receipt.abi_sequence; + a->block_num = action_trace.block_num; + a->block_time = action_trace.block_time; + a->tx_id = checksum_bytes(action_trace.trx_id); + if (not action_trace.console.empty()) a->console = action_trace.console; + + consume_action(a); + + for (auto& inline_trace: action_trace.inline_traces) { + push_action(inline_trace, action_trace.receipt.global_sequence, tx); + } +} + +void kafka::consume_block(BlockPtr block) { + auto payload = fc::json::to_string(*block, fc::json::legacy_generator); + Buffer buffer (block->id.data(), block->id.size()); + producer_->produce(MessageBuilder(block_topic_).partition(partition_).key(buffer).payload(payload)); +} + +void kafka::consume_transaction(TransactionPtr tx) { + auto payload = fc::json::to_string(*tx, fc::json::legacy_generator); + Buffer buffer (tx->id.data(), tx->id.size()); + producer_->produce(MessageBuilder(tx_topic_).partition(partition_).key(buffer).payload(payload)); +} + +void kafka::consume_transaction_trace(TransactionTracePtr tx_trace) { + auto payload = fc::json::to_string(*tx_trace, fc::json::legacy_generator); + Buffer buffer (tx_trace->id.data(), tx_trace->id.size()); + producer_->produce(MessageBuilder(tx_trace_topic_).partition(partition_).key(buffer).payload(payload)); +} + +void kafka::consume_action(ActionPtr action) { + auto payload = fc::json::to_string(*action, fc::json::legacy_generator); + Buffer buffer((char*)&action->global_seq, sizeof(action->global_seq)); + producer_->produce(MessageBuilder(action_topic_).partition(partition_).key(buffer).payload(payload)); +} + +} diff --git a/plugins/kafka_plugin/kafka.hpp b/plugins/kafka_plugin/kafka.hpp new file mode 100644 index 00000000000..5242ee872e7 --- /dev/null +++ b/plugins/kafka_plugin/kafka.hpp @@ -0,0 +1,46 @@ +#pragma once + +#include +#include + +#include + +#include "types.hpp" + +namespace kafka { + +using namespace std; +using namespace cppkafka; +using namespace eosio; + +class kafka { +public: + void set_config(Configuration config); + void set_topics(const string& block_topic, const string& tx_topic, const string& tx_trace_topic, const string& action_topic); + void set_partition(int partition); + void start(); + void stop(); + + void push_block(const chain::block_state_ptr& block_state, bool irreversible); + std::pair push_transaction(const chain::transaction_receipt& transaction_receipt, const BlockPtr& block, uint16_t block_seq); + void push_transaction_trace(const chain::transaction_trace_ptr& transaction_trace); + void push_action(const chain::action_trace& action_trace, uint64_t parent_seq, const TransactionTracePtr& tx); + +private: + void consume_block(BlockPtr block); + void consume_transaction(TransactionPtr tx); + void consume_transaction_trace(TransactionTracePtr tx_trace); + void consume_action(ActionPtr action); + + Configuration config_; + string block_topic_; + string tx_topic_; + string tx_trace_topic_; + string action_topic_; + + int partition_{-1}; + + std::unique_ptr producer_; +}; + +} diff --git a/plugins/kafka_plugin/kafka_plugin.cpp b/plugins/kafka_plugin/kafka_plugin.cpp new file mode 100644 index 00000000000..901fd57a29e --- /dev/null +++ b/plugins/kafka_plugin/kafka_plugin.cpp @@ -0,0 +1,166 @@ +#include "kafka_plugin.hpp" + +#include + +#include "kafka.hpp" +#include "try_handle.hpp" + +namespace eosio { + +using namespace std; + +namespace bpo = boost::program_options; +using bpo::options_description; +using bpo::variables_map; + +using kafka::handle; + +enum class compression_codec { + none, + gzip, + snappy, + lz4 +}; + +std::istream& operator>>(std::istream& in, compression_codec& codec) { + std::string s; + in >> s; + if (s == "none") codec = compression_codec::none; + else if (s == "gzip") codec = compression_codec::gzip; + else if (s == "snappy") codec = compression_codec::snappy; + else if (s == "lz4") codec = compression_codec::lz4; + else in.setstate(std::ios_base::failbit); + return in; +} + +static appbase::abstract_plugin& _kafka_relay_plugin = app().register_plugin(); + +kafka_plugin::kafka_plugin() : kafka_(std::make_unique()) {} +kafka_plugin::~kafka_plugin() {} + +void kafka_plugin::set_program_options(options_description&, options_description& cfg) { + cfg.add_options() + ("kafka-enable", bpo::value(), "Kafka enable") + ("kafka-broker-list", bpo::value()->default_value("127.0.0.1:9092"), "Kafka initial broker list, formatted as comma separated pairs of host or host:port, e.g., host1:port1,host2:port2") + ("kafka-block-topic", bpo::value()->default_value("eos.blocks"), "Kafka topic for message `block`") + ("kafka-transaction-topic", bpo::value()->default_value("eos.txs"), "Kafka topic for message `transaction`") + ("kafka-transaction-trace-topic", bpo::value()->default_value("eos.txtraces"), "Kafka topic for message `transaction_trace`") + ("kafka-action-topic", bpo::value()->default_value("eos.actions"), "Kafka topic for message `action`") + ("kafka-batch-num-messages", bpo::value()->default_value(1024), "Kafka minimum number of messages to wait for to accumulate in the local queue before sending off a message set") + ("kafka-queue-buffering-max-ms", bpo::value()->default_value(500), "Kafka how long to wait for kafka-batch-num-messages to fill up in the local queue") + ("kafka-compression-codec", bpo::value()->value_name("none/gzip/snappy/lz4"), "Kafka compression codec to use for compressing message sets, default is snappy") + ("kafka-request-required-acks", bpo::value()->default_value(1), "Kafka indicates how many acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, 1=Only the leader broker will need to ack the message, -1=broker will block until message is committed by all in sync replicas (ISRs) or broker's min.insync.replicas setting before sending response") + ("kafka-message-send-max-retries", bpo::value()->default_value(2), "Kafka how many times to retry sending a failing MessageSet") + ("kafka-start-block-num", bpo::value()->default_value(1), "Kafka starts syncing from which block number") + ("kafka-statistics-interval-ms", bpo::value()->default_value(0), "Kafka statistics emit interval, maximum is 86400000, 0 disables statistics") + ("kafka-fixed-partition", bpo::value()->default_value(-1), "Kafka specify fixed partition for all topics, -1 disables specify") + ; + // TODO: security options +} + +void kafka_plugin::plugin_initialize(const variables_map& options) { + if (not options.count("kafka-enable") || not options.at("kafka-enable").as()) { + wlog("kafka_plugin disabled, since no --kafka-enable=true specified"); + return; + } + + ilog("Initialize kafka plugin"); + configured_ = true; + + string compressionCodec = "snappy"; + if (options.count("kafka-compression-codec")) { + switch (options.at("kafka-compression-codec").as()) { + case compression_codec::none: + compressionCodec = "none"; + break; + case compression_codec::gzip: + compressionCodec = "gzip"; + break; + case compression_codec::snappy: + compressionCodec = "snappy"; + break; + case compression_codec::lz4: + compressionCodec = "lz4"; + break; + } + } + + kafka::Configuration config = { + {"metadata.broker.list", options.at("kafka-broker-list").as()}, + {"batch.num.messages", options.at("kafka-batch-num-messages").as()}, + {"queue.buffering.max.ms", options.at("kafka-queue-buffering-max-ms").as()}, + {"compression.codec", compressionCodec}, + {"request.required.acks", options.at("kafka-request-required-acks").as()}, + {"message.send.max.retries", options.at("kafka-message-send-max-retries").as()}, + {"socket.keepalive.enable", true} + }; + auto stats_interval = options.at("kafka-statistics-interval-ms").as(); + if (stats_interval > 0) { + config.set("statistics.interval.ms", stats_interval); + config.set_stats_callback([](kafka::KafkaHandleBase& handle, const std::string& json) { + ilog("kafka stats: ${json}", ("json", json)); + }); + } + kafka_->set_config(config); + kafka_->set_topics( + options.at("kafka-block-topic").as(), + options.at("kafka-transaction-topic").as(), + options.at("kafka-transaction-trace-topic").as(), + options.at("kafka-action-topic").as() + ); + + if (options.at("kafka-fixed-partition").as() >= 0) { + kafka_->set_partition(options.at("kafka-fixed-partition").as()); + } + + unsigned start_block_num = options.at("kafka-start-block-num").as(); + + // add callback to chain_controller config + chain_plugin_ = app().find_plugin(); + auto& chain = chain_plugin_->chain(); + + block_conn_ = chain.accepted_block.connect([=](const chain::block_state_ptr& b) { + if (not start_sync_) { + if (b->block_num >= start_block_num) start_sync_ = true; + else return; + } + handle([=] { kafka_->push_block(b, false); }, "push block"); + }); + irreversible_block_conn_ = chain.irreversible_block.connect([=](const chain::block_state_ptr& b) { + if (not start_sync_) { + if (b->block_num >= start_block_num) start_sync_ = true; + else return; + } + handle([=] { kafka_->push_block(b, true); }, "push irreversible block"); + }); + transaction_conn_ = chain.applied_transaction.connect([=](const chain::transaction_trace_ptr& t) { + if (not start_sync_) return; + handle([=] { kafka_->push_transaction_trace(t); }, "push transaction"); + }); +} + +void kafka_plugin::plugin_startup() { + if (not configured_) return; + ilog("Starting kafka_plugin"); + kafka_->start(); + ilog("Started kafka_plugin"); +} + +void kafka_plugin::plugin_shutdown() { + if (not configured_) return; + ilog("Stopping kafka_plugin"); + + try { + block_conn_.disconnect(); + irreversible_block_conn_.disconnect(); + transaction_conn_.disconnect(); + + kafka_->stop(); + } catch (const std::exception& e) { + elog("Exception on kafka_plugin shutdown: ${e}", ("e", e.what())); + } + + ilog("Stopped kafka_plugin"); +} + +} diff --git a/plugins/kafka_plugin/kafka_plugin.hpp b/plugins/kafka_plugin/kafka_plugin.hpp new file mode 100644 index 00000000000..0bc66108ad2 --- /dev/null +++ b/plugins/kafka_plugin/kafka_plugin.hpp @@ -0,0 +1,41 @@ +#pragma once + +#include +#include + +namespace kafka { +class kafka; // forward declaration +} + +namespace eosio { + +using namespace appbase; + +class kafka_plugin : public appbase::plugin { +public: + APPBASE_PLUGIN_REQUIRES((kafka_plugin)) + + kafka_plugin(); + virtual ~kafka_plugin(); + + void set_program_options(options_description&, options_description& cfg) override; + + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + +private: + bool configured_{}; + + chain_plugin* chain_plugin_{}; + + boost::signals2::connection block_conn_; + boost::signals2::connection irreversible_block_conn_; + boost::signals2::connection transaction_conn_; + + std::atomic start_sync_{false}; + + std::unique_ptr kafka_; +}; + +} diff --git a/plugins/kafka_plugin/readme.md b/plugins/kafka_plugin/readme.md new file mode 100644 index 00000000000..ecde9474053 --- /dev/null +++ b/plugins/kafka_plugin/readme.md @@ -0,0 +1,31 @@ +# Kafka Plugin 说明 + +### 一、Topic + +默认开启以下4个topic + + 1. blocks // 其中block字段是由完整区块数据持久化的json结构,是一份全量数据。 + 2. transaction + 3. transaction_trace + 4. action + + transaction、transaction_trace、action为nodeos中数据解析所得,提取了主要的可能使用的字段(相当于推荐配置),业务使用者可根据需要适当增减字段。 另,也可以删除这三个topic,仅依赖blocks中的全量数据。 + + 详见:`plugins/kafka_plugin/types.hpp ` + + +### 二、常见问题 + +#### bos在Mac上编译常见报错 +``` +Could not find a package configuration file provided by "RdKafka" with any of the following names: + RdKafkaConfig.cmake + rdkafka-config.cmake +``` + +原因:系统安装的kafka版本太低 + +解决方法: + + 删除`/usr/local/include/cppkafka` , `/usr/local/include/librdkafka`两个目录 + 重新开始bos编译(会自动下载安装适配的kafka版本) \ No newline at end of file diff --git a/plugins/kafka_plugin/try_handle.cpp b/plugins/kafka_plugin/try_handle.cpp new file mode 100644 index 00000000000..e9be213b652 --- /dev/null +++ b/plugins/kafka_plugin/try_handle.cpp @@ -0,0 +1,17 @@ +#include "try_handle.hpp" + +namespace kafka { + +void handle(std::function handler, const std::string& desc) { + try { + handler(); + } catch (fc::exception& e) { + elog("FC Exception while ${desc}: ${e}", ("e", e.to_string())("desc", desc)); + } catch (std::exception& e) { + elog("STD Exception while ${desc}: ${e}", ("e", e.what())("desc", desc)); + } catch (...) { + elog("Unknown exception while ${desc}", ("desc", desc)); + } +} + +} diff --git a/plugins/kafka_plugin/try_handle.hpp b/plugins/kafka_plugin/try_handle.hpp new file mode 100644 index 00000000000..7d059e34dcc --- /dev/null +++ b/plugins/kafka_plugin/try_handle.hpp @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace kafka { + +void handle(std::function handler, const std::string& desc); + +} diff --git a/plugins/kafka_plugin/types.hpp b/plugins/kafka_plugin/types.hpp new file mode 100644 index 00000000000..65f4091fb1f --- /dev/null +++ b/plugins/kafka_plugin/types.hpp @@ -0,0 +1,94 @@ +#pragma once + +#include + +namespace kafka { + +using name_t = uint64_t; +using std::string; +using bytes = std::vector; +using eosio::chain::block_timestamp_type; + +struct Block { + bytes id; + unsigned num; + + block_timestamp_type timestamp; + + bool lib; // whether irreversible + + bytes block; + + uint32_t tx_count{}; + uint32_t action_count{}; + uint32_t context_free_action_count{}; +}; + +struct Transaction { + bytes id; + + bytes block_id; + uint32_t block_num; + block_timestamp_type block_time; + + uint16_t block_seq; // the sequence number of this transaction in its block + + uint32_t action_count{}; + uint32_t context_free_action_count{}; +}; + +enum TransactionStatus { + executed, soft_fail, hard_fail, delayed, expired, unknown +}; + +struct TransactionTrace { // new ones will override old ones, typically when status is changed + bytes id; + + uint32_t block_num; + + bool scheduled; + + TransactionStatus status; + unsigned net_usage_words; + uint32_t cpu_usage_us; + + string exception; +}; + +struct Action { + uint64_t global_seq; // the global sequence number of this action + uint64_t recv_seq; // the sequence number of this action for this receiver + + uint64_t parent_seq; // parent action trace global sequence number, only for inline traces + + name_t account; // account name + name_t name; // action name + bytes auth; // binary serialization of authorization array of permission_level + bytes data; // payload + + name_t receiver; // where this action is executed on; may not be equal with `account_`, such as from notification + + bytes auth_seq; + unsigned code_seq; + unsigned abi_seq; + + uint32_t block_num; + block_timestamp_type block_time; + bytes tx_id; // the transaction that generated this action + + string console; +}; + +using BlockPtr = std::shared_ptr; +using TransactionPtr = std::shared_ptr; +using TransactionTracePtr = std::shared_ptr; +using ActionPtr = std::shared_ptr; + +} + +FC_REFLECT_ENUM(kafka::TransactionStatus, (executed)(soft_fail)(hard_fail)(delayed)(expired)(unknown)) + +FC_REFLECT(kafka::Block, (id)(num)(timestamp)(lib)(block)(tx_count)(action_count)(context_free_action_count)) +FC_REFLECT(kafka::Transaction, (id)(block_id)(block_num)(block_time)(block_seq)(action_count)(context_free_action_count)) +FC_REFLECT(kafka::TransactionTrace, (id)(block_num)(scheduled)(status)(net_usage_words)(cpu_usage_us)(exception)) +FC_REFLECT(kafka::Action, (global_seq)(recv_seq)(parent_seq)(account)(name)(auth)(data)(receiver)(auth_seq)(code_seq)(abi_seq)(block_num)(block_time)(tx_id)(console)) diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index a736a9ff464..bbdd357eec7 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -132,6 +132,14 @@ namespace eosio { uint32_t end_block; }; +struct request_p2p_message{ + bool discoverable; + }; + + struct response_p2p_message{ + bool discoverable; + string p2p_peer_list; + }; using net_message = static_variant; + packed_transaction, + response_p2p_message, + request_p2p_message>; } // namespace eosio @@ -159,7 +169,8 @@ FC_REFLECT( eosio::time_message, (org)(rec)(xmt)(dst) ) FC_REFLECT( eosio::notice_message, (known_trx)(known_blocks) ) FC_REFLECT( eosio::request_message, (req_trx)(req_blocks) ) FC_REFLECT( eosio::sync_request_message, (start_block)(end_block) ) - +FC_REFLECT( eosio::request_p2p_message, (discoverable) ) +FC_REFLECT( eosio::response_p2p_message, (discoverable)(p2p_peer_list) ) /** * Goals of Network Code diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9ee380b666f..8283c5c9320 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -118,6 +118,13 @@ namespace eosio { > node_transaction_index; + struct p2p_peer_record{ + string peer_address; + time_point_sec expiry; + bool is_config; + bool discoverable; + bool connected; + }; class net_plugin_impl { public: unique_ptr acceptor; @@ -128,6 +135,9 @@ namespace eosio { uint32_t num_clients = 0; vector supplied_peers; + map p2p_peer_records; + bool p2p_discoverable; + bool request_p2p_flag=true; vector allowed_peers; ///< peer keys allowed to connect std::map private_keys; ///< overlapping with producer keys, also authenticating non-producing nodes @@ -197,6 +207,8 @@ namespace eosio { bool is_valid( const handshake_message &msg); + void send_p2p_request(connection_ptr c); + void handle_message( connection_ptr c, const handshake_message &msg); void handle_message( connection_ptr c, const chain_size_message &msg); void handle_message( connection_ptr c, const go_away_message &msg ); @@ -220,6 +232,8 @@ namespace eosio { void handle_message( connection_ptr c, const sync_request_message &msg); void handle_message( connection_ptr c, const signed_block &msg); void handle_message( connection_ptr c, const packed_transaction &msg); + void handle_message( connection_ptr c, const request_p2p_message &msg); + void handle_message( connection_ptr c, const response_p2p_message &msg); void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void start_txn_timer( ); @@ -575,6 +589,8 @@ namespace eosio { std::function callback); void do_queue_write(); + void send_p2p_request(bool discoverable); + void send_p2p_response(bool discoverable,string p2p_peer_list); /** \brief Process the next message from the pending message buffer * * Process the next message from the pending_message_buffer. @@ -942,6 +958,29 @@ namespace eosio { } + void connection::send_p2p_request(bool discoverable) + { + try + { + enqueue(net_message(request_p2p_message{discoverable})); + } + catch (...) + { + elog("send request_p2p_message message error"); + } + } + void connection::send_p2p_response(bool discoverable, string p2p_list) + { + try + { + enqueue(net_message(response_p2p_message{discoverable, p2p_list})); + } + catch (...) + { + elog("send response_p2p_message message error"); + } + } + void connection::stop_send() { syncing = false; } @@ -1952,7 +1991,8 @@ namespace eosio { if( !err && c->socket->is_open() ) { if (start_session( c )) { c->send_handshake (); - } + send_p2p_request(c); + } } else { if( endpoint_itr != tcp::resolver::iterator() ) { close(c); @@ -1968,6 +2008,37 @@ namespace eosio { } ); } + void net_plugin_impl::send_p2p_request(connection_ptr c) + { + if (p2p_discoverable && request_p2p_flag) + { + auto peer_record = p2p_peer_records.find(c->peer_addr); + if (peer_record != p2p_peer_records.end()) + { + if (peer_record->second.is_config && !peer_record->second.connected) + { + c->send_p2p_request(p2p_discoverable); + peer_record->second.connected = true; + } + else + { + bool stop_flag = true; + for (auto record : p2p_peer_records) + { + if (record.second.is_config && !( record.second.connected||record.second.expiry < time_point::now())) + { + stop_flag = false; + break; + } + } + if (stop_flag) + { + request_p2p_flag = false; + } + } + } + } + } bool net_plugin_impl::start_session( connection_ptr con ) { boost::asio::ip::tcp::no_delay nodelay( true ); boost::system::error_code ec; @@ -2217,6 +2288,56 @@ namespace eosio { } + void net_plugin_impl::handle_message( connection_ptr c, const request_p2p_message &msg){ + peer_ilog(c, "received request_p2p_message"); + string rspm; + for(auto sd :p2p_peer_records){ + if(sd.second.discoverable){ + rspm.append(sd.second.peer_address+"#"); + } + } + if(p2p_discoverable||rspm.size()>0){ + c->send_p2p_response(p2p_discoverable,rspm); + } + } + + void net_plugin_impl::handle_message( connection_ptr c, const response_p2p_message &msg){ + peer_ilog(c, "received response_p2p_message"); + auto peer_record=p2p_peer_records.find(c->peer_addr); + if(peer_record!=p2p_peer_records.end()){ + peer_record->second.discoverable=msg.discoverable; + if (peer_record->second.is_config&&msg.p2p_peer_list.length()>0){ + + vector p2p_peer_list; + int start = 0; + string delim="#"; + int idx = msg.p2p_peer_list.find(delim, start); + string peer_list; + while( idx != std::string::npos ) + { + if(max_nodes_per_host<=connections.size()||max_nodes_per_host<=p2p_peer_records.size()){ + return; + } + peer_list=msg.p2p_peer_list.substr(start, idx-start); + if(peer_list.size()<3){ + break; + } + start = idx+delim.size(); + idx = msg.p2p_peer_list.find(delim, start); + if( find_connection( peer_list )) + continue; + p2p_peer_record p2prcd; + p2prcd.peer_address=peer_list; + p2prcd.discoverable=false; + p2prcd.is_config=true; + p2prcd.connected=false; + p2p_peer_records.insert(pair(peer_list,p2prcd)); + connection_ptr c = std::make_shared(peer_list); + fc_dlog(logger,"adding new connection to the list"); + connections.insert( c ); + }}} + } + void net_plugin_impl::handle_message( connection_ptr c, const handshake_message &msg) { peer_ilog(c, "received handshake_message"); if (!is_valid(msg)) { @@ -2879,6 +3000,8 @@ namespace eosio { ( "sync-fetch-span", bpo::value()->default_value(def_sync_fetch_span), "number of blocks to retrieve in a chunk from any individual peer during synchronization") ( "max-implicit-request", bpo::value()->default_value(def_max_just_send), "maximum sizes of transaction or block messages that are sent without first sending a notice") ( "use-socket-read-watermark", bpo::value()->default_value(false), "Enable expirimental socket read watermark optimization") + ( "p2p-discoverable", bpo::value()->default_value(false), + "True to p2p discoverable.") ( "peer-log-format", bpo::value()->default_value( "[\"${_name}\" ${_ip}:${_port}]" ), "The string used to format peers when logging messages about them. Variables are escaped with ${}.\n" "Available Variables:\n" @@ -2919,6 +3042,8 @@ namespace eosio { my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); + my->p2p_discoverable=options.at( "p2p-discoverable" ).as(); + my->resolver = std::make_shared( std::ref( app().get_io_service())); if( options.count( "p2p-listen-endpoint" )) { my->p2p_address = options.at( "p2p-listen-endpoint" ).as(); @@ -3037,6 +3162,14 @@ namespace eosio { my->start_monitors(); for( auto seed_node : my->supplied_peers ) { + p2p_peer_record p2prcd; + p2prcd.peer_address=seed_node; + p2prcd.discoverable=false; + p2prcd.is_config=true; + p2prcd.connected=false; + p2prcd.expiry=time_point_sec((time_point::now()).sec_since_epoch()+10); + my->p2p_peer_records.insert(pair(seed_node,p2prcd)); + connect( seed_node ); } diff --git a/plugins/notify_plugin/CMakeLists.txt b/plugins/notify_plugin/CMakeLists.txt new file mode 100644 index 00000000000..ea3d85eea9a --- /dev/null +++ b/plugins/notify_plugin/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB HEADERS "include/eosio/notify_plugin/*.hpp") +add_library( notify_plugin + notify_plugin.cpp + ${HEADERS} include/eosio/notify_plugin/notify_plugin.hpp) + +target_link_libraries( notify_plugin chain_plugin eosio_chain appbase fc ) +target_include_directories( notify_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) \ No newline at end of file diff --git a/plugins/notify_plugin/README.md b/plugins/notify_plugin/README.md new file mode 100644 index 00000000000..b962c6e0ead --- /dev/null +++ b/plugins/notify_plugin/README.md @@ -0,0 +1,73 @@ +# notify_plugin + +Send real time actions on chain to a `receive_url`, which you can use to do some notifications. + +### Usage + +Add some configs to your `config.ini` just as follows: + +``` +## Notify Plugin +plugin = eosio::notify_plugin +# notify-filter-on = account:action +notify-filter-on = b1: +notify-filter-on = b1:transfer +notify-filter-on = eosio:delegatebw +# http endpoint for each action seen on the chain. +notify-receive-url = http://127.0.0.1:8080/notify +# Age limit in seconds for blocks to send notifications. No age limit if set to negative. +# Used to prevent old actions from trigger HTTP request while on replay (seconds) +notify-age-limit = -1 +# Retry times of sending http notification if failed. +notify-retry-times = 3 +``` + +And you can receive the actions on chain by watching your server endpoint: `http://127.0.0.1:8080/notify`, the data sent to the API endpoint looks like: + +```json +{ + "irreversible": true, + "actions": [{ + "tx_id": "b31885bada6c2d5e71b1302e87d4006c59ff2a40a12108559d76142548d8cf79", + "account": "eosio.token", + "name": "transfer", + "seq_num": 1, + "receiver": "b1", + "block_time": "2018-09-29T11:51:06.000", + "block_num": 127225, + "authorization": [{ + "actor": "b1", + "permission": "active" + } + ], + "action_data": { + "from": "b1", + "to": "b11", + "quantity": "0.0001 EOS", + "memo": "Transfer from b1 to b11" + } + },{ + "tx_id": "b31885bada6c2d5e71b1302e87d4006c59ff2a40a12108559d76142548d8cf79", + "account": "eosio.token", + "name": "transfer", + "seq_num": 2, + "receiver": "b11", + "block_time": "2018-09-29T11:51:06.000", + "block_num": 127225, + "authorization": [{ + "actor": "b1", + "permission": "active" + } + ], + "action_data": { + "from": "b1", + "to": "b11", + "quantity": "0.0001 EOS", + "memo": "Transfer from b1 to b11" + } + } + ] +} +``` + +In your server side, you can use these actions to do many things, such as creating a telegram alert bot which you can subscribe on and receive your account's information on chain. \ No newline at end of file diff --git a/plugins/notify_plugin/include/eosio/notify_plugin/http_async_client.hpp b/plugins/notify_plugin/include/eosio/notify_plugin/http_async_client.hpp new file mode 100644 index 00000000000..cfc8f38a294 --- /dev/null +++ b/plugins/notify_plugin/include/eosio/notify_plugin/http_async_client.hpp @@ -0,0 +1,104 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace eosio +{ +using namespace fc; +namespace asio = boost::asio; + +template +struct final_action +{ + final_action(F f) : clean{f} {} + ~final_action() { clean(); } + +private: + F clean; +}; + +template +final_action finally(F f) +{ + return final_action(f); +} + +class http_async_client +{ +public: + http_async_client() : sync_client(std::make_unique()), + work_guard(asio::make_work_guard(ioc)) {} + + ~http_async_client() + { + work_guard.reset(); + } + + void start() + { + worker = std::make_unique([this]() { + ioc.run(); + }); + } + + void stop() + { + work_guard.reset(); + worker->join(); + } + + void set_default_retry_times(int64_t t) { + default_retry_times = t; + } + + template + void post(const url &dest, const T &payload, const time_point &deadline = time_point::maximum()) + { + asio::post(ioc.get_executor(), [this, dest, payload, deadline]() { + post_sync(dest, payload, deadline); + }); + } + +private: + template + void post_sync(const url &dest, const T &payload, + const time_point &deadline = time_point::maximum()) + { + auto exit = finally([this]() { + retry_times = default_retry_times; + }); + + try + { + sync_client->post_sync(dest, payload, deadline); + } + catch (const fc::eof_exception &exc) + { + } + catch (const fc::assert_exception &exc) + { + wlog("Exception while trying to send: ${exc}", ("exc", exc.to_detail_string())); + if (retry_times > 0) + { + wlog("Trying ${t} times: ", ("t", retry_times)); + retry_times--; + post_sync(dest, payload, deadline); + } + } + FC_CAPTURE_AND_LOG((dest)(payload)(deadline)) + }; + + std::unique_ptr sync_client; + std::unique_ptr worker; + asio::io_context ioc; + asio::executor_work_guard work_guard; + int64_t default_retry_times = 3; + int64_t retry_times = default_retry_times; +}; +} // namespace eosio \ No newline at end of file diff --git a/plugins/notify_plugin/include/eosio/notify_plugin/notify_plugin.hpp b/plugins/notify_plugin/include/eosio/notify_plugin/notify_plugin.hpp new file mode 100644 index 00000000000..e2a23a3a74a --- /dev/null +++ b/plugins/notify_plugin/include/eosio/notify_plugin/notify_plugin.hpp @@ -0,0 +1,33 @@ +/** + * @file + * @copyright eospace in eos/LICENSE.txt + */ +#pragma once +#include +#include + +namespace eosio { + +using namespace appbase; +using notify_plugin_ptr = std::unique_ptr; + +/** + * notify_plugin: make notifications to apps on chain. + */ +class notify_plugin : public appbase::plugin { +public: + notify_plugin(); + virtual ~notify_plugin(); + + APPBASE_PLUGIN_REQUIRES((chain_plugin)) + virtual void set_program_options(options_description&, options_description& cfg) override; + + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + +private: + notify_plugin_ptr my; +}; + +} diff --git a/plugins/notify_plugin/notify_plugin.cpp b/plugins/notify_plugin/notify_plugin.cpp new file mode 100644 index 00000000000..377daf8e5ef --- /dev/null +++ b/plugins/notify_plugin/notify_plugin.cpp @@ -0,0 +1,359 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +namespace eosio +{ +static appbase::abstract_plugin &_notify_plugin = app().register_plugin(); +using namespace chain; +typedef uint32_t action_seq_type; + +class notify_plugin_impl +{ +public: + static const int64_t default_age_limit = 60; + static const int64_t default_retry_times = 3; + static const fc::microseconds http_timeout; + static const fc::microseconds max_deserialization_time; + + fc::url receive_url; + int64_t age_limit = default_age_limit; + int64_t retry_times = default_retry_times; + http_async_client httpc; + + struct sequenced_action : public action + { + sequenced_action(const action &act, action_seq_type seq, account_name receiver) + : action(act), seq_num(seq), receiver(receiver) {} + + action_seq_type seq_num; + account_name receiver; + }; + + struct action_notify + { + action_notify(const sequenced_action &act, transaction_id_type tx_id, + const variant &action_data, fc::time_point block_time, + uint32_t block_num) + : tx_id(tx_id), account(act.account), name(act.name), receiver(act.receiver), + seq_num(act.seq_num), block_time(block_time), block_num(block_num), + authorization(act.authorization), action_data(action_data) {} + + transaction_id_type tx_id; + account_name account; + account_name name; + account_name receiver; + action_seq_type seq_num; + fc::time_point block_time; + uint32_t block_num; + + vector authorization; + fc::variant action_data; + }; + + struct message + { + message() : irreversible(false), actions() {} + bool irreversible; + std::vector actions; + }; + + struct filter_entry + { + name receiver; + name action; + + std::tuple key() const + { + return std::make_tuple(receiver, action); + } + + friend bool operator<(const filter_entry &a, const filter_entry &b) + { + return a.key() < b.key(); + } + }; + + typedef std::unordered_multimap action_queue_type; + + chain_plugin *chain_plug = nullptr; + std::set filter_on; + fc::optional accepted_block_conn; + fc::optional irreversible_block_conn; + fc::optional applied_tx_conn; + action_queue_type action_queue; + action_queue_type irreversible_action_queue; + + bool filter(const action_trace &act); + fc::variant deserialize_action_data(action act); + void build_message(message &msg, const block_state_ptr &block, const transaction_id_type &tx_id, bool irreversible); + void send_message(const message &msg); + action_seq_type on_action_trace(const action_trace &act, const transaction_id_type &tx_id, action_seq_type act_s); + void on_applied_tx(const transaction_trace_ptr &trace); + void on_accepted_block(const block_state_ptr &block_state); + void on_irreversible_block(const block_state_ptr &block_state); +}; + +bool notify_plugin_impl::filter(const action_trace &act) +{ + if (filter_on.find({act.receipt.receiver, act.act.name}) != filter_on.end()) + { + return true; + } + else if (filter_on.find({act.receipt.receiver, 0}) != filter_on.end()) + { + return true; + } + return false; +} + +fc::variant notify_plugin_impl::deserialize_action_data(action act) +{ + auto &chain = chain_plug->chain(); + auto serializer = chain.get_abi_serializer(act.account, max_deserialization_time); + FC_ASSERT(serializer.valid() && serializer->get_action_type(act.name) != action_name(), + "Unable to get abi for account: ${acc}, action: ${a} Not sending notification.", + ("acc", act.account)("a", act.name)); + return serializer->binary_to_variant(act.name.to_string(), act.data, max_deserialization_time); +} + +void notify_plugin_impl::build_message(message &msg, const block_state_ptr &block, const transaction_id_type &tx_id, const bool irreversible) +{ + // dlog("irreversible: ${a}", ("a", fc::json::to_pretty_string(irreversible))); + auto range = irreversible ? irreversible_action_queue.equal_range(tx_id) : action_queue.equal_range(tx_id); + + msg.irreversible = irreversible; + for (auto &it = range.first; it != range.second; it++) + { + auto act_data = deserialize_action_data(it->second); + action_notify notify(it->second, tx_id, std::forward(act_data), + block->block->timestamp, block->block->block_num()); + msg.actions.push_back(notify); + } +} + +void notify_plugin_impl::send_message(const message &msg) +{ + try + { + httpc.post(receive_url, msg, fc::time_point::now() + http_timeout); + } + FC_CAPTURE_AND_LOG(("Error while sending notification")(msg)); +} + +action_seq_type notify_plugin_impl::on_action_trace(const action_trace &act, const transaction_id_type &tx_id, + action_seq_type act_s) +{ + if (filter(act)) + { + const auto pair = std::make_pair(tx_id, sequenced_action(act.act, act_s, act.receipt.receiver)); + action_queue.insert(pair); + irreversible_action_queue.insert(pair); + // dlog("on_action_trace: ${a}", ("a", fc::json::to_pretty_string(act.act))); + } + act_s++; + + for (const auto &iline : act.inline_traces) + { + act_s = on_action_trace(iline, tx_id, act_s); + } + return act_s; +} + +void notify_plugin_impl::on_applied_tx(const transaction_trace_ptr &trace) +{ + auto id = trace->id; + + if (!action_queue.count(id) || !irreversible_action_queue.count(id)) + { + action_seq_type seq = 0; + for (auto &at : trace->action_traces) + { + seq = on_action_trace(at, id, seq); + } + } +} + +void notify_plugin_impl::on_accepted_block(const block_state_ptr &block_state) +{ + fc::time_point block_time = block_state->block->timestamp; + + if (age_limit == -1 || (fc::time_point::now() - block_time < fc::seconds(age_limit))) + { + message msg; + transaction_id_type tx_id; + // dlog("block_state->block->transactions: ${a}", ("a", fc::json::to_pretty_string(block_state->block->transactions))); + for (const auto &trx : block_state->block->transactions) + { + if (trx.trx.contains()) + { + tx_id = trx.trx.get(); + } + else + { + tx_id = trx.trx.get().id(); + } + + // dlog("tx_id: ${a}", ("a", fc::json::to_pretty_string(tx_id))); + // dlog("action_queue.size(): ${a}", ("a", fc::json::to_pretty_string(action_queue.size()))); + if (action_queue.count(tx_id)) + { + build_message(msg, block_state, tx_id, false); + } + } + // dlog("msg: ${a}", ("a", msg)); + if (msg.actions.size() > 0) + { + send_message(msg); + } + } + action_queue.clear(); +} + +void notify_plugin_impl::on_irreversible_block(const block_state_ptr &block_state) +{ + fc::time_point block_time = block_state->block->timestamp; + if (age_limit == -1 || (fc::time_point::now() - block_time < fc::seconds(age_limit))) + { + message msg; + transaction_id_type tx_id; + // dlog("block_state->block->transactions: ${a}", ("a", fc::json::to_pretty_string(block_state->block->transactions))); + for (const auto &trx : block_state->block->transactions) + { + if (trx.trx.contains()) + { + tx_id = trx.trx.get(); + } + else + { + tx_id = trx.trx.get().id(); + } + // dlog("tx_id: ${a}", ("a", fc::json::to_pretty_string(tx_id))); + // dlog("irreversible_action_queue.size(): ${a}", ("a", fc::json::to_pretty_string(irreversible_action_queue.size()))); + if (irreversible_action_queue.count(tx_id)) + { + build_message(msg, block_state, tx_id, true); + } + } + // dlog("msg: ${a}", ("a", msg)); + if (msg.actions.size() > 0) + { + send_message(msg); + irreversible_action_queue.clear(); + } + } +} + +const fc::microseconds notify_plugin_impl::http_timeout = fc::seconds(10); +const fc::microseconds notify_plugin_impl::max_deserialization_time = fc::seconds(5); +const int64_t notify_plugin_impl::default_age_limit; +const int64_t notify_plugin_impl::default_retry_times; + +notify_plugin::notify_plugin() : my(new notify_plugin_impl()) {} +notify_plugin::~notify_plugin() {} + +void notify_plugin::set_program_options(options_description &, options_description &cfg) +{ + cfg.add_options()("notify-filter-on", bpo::value>()->composing(), + "Track actions and make notifications then it match receiver:action. In case action is not specified, " + "all actions to specified account are tracked.") + ("notify-receive-url", bpo::value(), "Notify URL which can receive the notifications") + ("notify-age-limit", bpo::value()->default_value(notify_plugin_impl::default_age_limit), + "Age limit in seconds for blocks to send notifications about." + " No age limit if this is set to negative.") + ("notify-retry-times", bpo::value()->default_value(notify_plugin_impl::default_retry_times), + "Retry times of sending http notification if failed.") + ; +} + +void notify_plugin::plugin_initialize(const variables_map &options) +{ + try + { + EOS_ASSERT(options.count("notify-receive-url") == 1, fc::invalid_arg_exception, + "notify_plugin requires one notify-receiver-url to be specified!"); + + EOS_ASSERT(options.count("notify-age-limit") == 1, fc::invalid_arg_exception, + "notify_plugin requires one notify-age-limit to be specified!"); + + EOS_ASSERT(options.count("notify-retry-times") == 1, fc::invalid_arg_exception, + "notify_plugin requires one notify-retry-times to be specified!"); + + string url_str = options.at("notify-receive-url").as(); + my->receive_url = fc::url(url_str); + + if (options.count("notify-filter-on")) + { + auto fo = options.at("notify-filter-on").as>(); + for (auto &s : fo) + { + std::vector v; + boost::split(v, s, boost::is_any_of(":")); + EOS_ASSERT(v.size() == 2, fc::invalid_arg_exception, + "Invalid value ${s} for --notify-filter-on", + ("s", s)); + notify_plugin_impl::filter_entry fe{v[0], v[1]}; + EOS_ASSERT(fe.receiver.value, fc::invalid_arg_exception, "Invalid value ${s} for --notify-filter-on", ("s", s)); + my->filter_on.insert(fe); + } + } + + if (options.count("notify-age-limit")) + my->age_limit = options.at("notify-age-limit").as(); + + if (options.count("notify-retry-times")) + my->retry_times = options.at("notify-retry-times").as(); + + my->httpc.set_default_retry_times(my->retry_times); + my->chain_plug = app().find_plugin(); + auto &chain = my->chain_plug->chain(); + my->accepted_block_conn.emplace(chain.accepted_block.connect( + [&](const block_state_ptr &b_state) { + my->on_accepted_block(b_state); + })); + + my->irreversible_block_conn.emplace(chain.irreversible_block.connect( + [&](const block_state_ptr &bs) { + my->on_irreversible_block(bs); + })); + + my->applied_tx_conn.emplace(chain.applied_transaction.connect( + [&](const transaction_trace_ptr &tx) { + my->on_applied_tx(tx); + })); + } + FC_LOG_AND_RETHROW() +} + +void notify_plugin::plugin_startup() +{ + ilog("Notify plugin started"); + my->httpc.start(); +} + +void notify_plugin::plugin_shutdown() +{ + my->applied_tx_conn.reset(); + my->accepted_block_conn.reset(); + my->irreversible_block_conn.reset(); + my->httpc.stop(); +} +} // namespace eosio + +FC_REFLECT(eosio::notify_plugin_impl::action_notify, (tx_id)(account)(name)(seq_num)(receiver)(block_time)(block_num)(authorization)(action_data)) +FC_REFLECT(eosio::notify_plugin_impl::message, (irreversible)(actions)) \ No newline at end of file diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c46452b5fb4..2d60e92a006 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1071,8 +1071,13 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool } } + signature_provider_type signature_provider; + if (signature_provider_itr != _signature_providers.end()) { + signature_provider = signature_provider_itr->second; + } + chain.abort_block(); - chain.start_block(block_time, blocks_to_confirm); + chain.start_block(block_time, blocks_to_confirm, signature_provider); } FC_LOG_AND_DROP(); const auto& pbs = chain.pending_block_state(); diff --git a/programs/cleos/httpc.hpp b/programs/cleos/httpc.hpp index 54e60866fbc..850b7bab787 100644 --- a/programs/cleos/httpc.hpp +++ b/programs/cleos/httpc.hpp @@ -105,6 +105,7 @@ namespace eosio { namespace client { namespace http { const string history_func_base = "/v1/history"; const string get_actions_func = history_func_base + "/get_actions"; const string get_transaction_func = history_func_base + "/get_transaction"; + const string get_block_detail_func = history_func_base + "/get_block_detail"; const string get_key_accounts_func = history_func_base + "/get_key_accounts"; const string get_controlled_accounts_func = history_func_base + "/get_controlled_accounts"; diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index b2a2c326353..7124357f158 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -552,12 +552,15 @@ chain::action create_delegate(const name& from, const name& receiver, const asse config::system_account_name, N(delegatebw), act_payload); } -fc::variant regproducer_variant(const account_name& producer, const public_key_type& key, const string& url, uint16_t location) { +fc::variant regproducer_variant(const account_name& producer, const public_key_type& key, const string& url, const string& location) { +auto _location=atoi(location.c_str()); + FC_ASSERT(_location>-12&&_location<=12,"time zone setting is not legal"); + _location=_location>=0?_location:24+_location; return fc::mutable_variant_object() ("producer", producer) ("producer_key", key) ("url", url) - ("location", location) + ("location", _location) ; } @@ -861,14 +864,14 @@ struct register_producer_subcommand { string producer_str; string producer_key_str; string url; - uint16_t loc = 0; + string loc; register_producer_subcommand(CLI::App* actionRoot) { auto register_producer = actionRoot->add_subcommand("regproducer", localized("Register a new producer")); register_producer->add_option("account", producer_str, localized("The account to register as a producer"))->required(); register_producer->add_option("producer_key", producer_key_str, localized("The producer's public key"))->required(); register_producer->add_option("url", url, localized("url where info about producer can be found"), true); - register_producer->add_option("location", loc, localized("relative location for purpose of nearest neighbor scheduling"), true); + register_producer->add_option("location", loc, localized("time zone from -11 to 12 "))->required(); add_standard_transaction_options(register_producer); @@ -2146,6 +2149,15 @@ int main( int argc, char** argv ) { std::cout << fc::json::to_pretty_string(call(get_transaction_func, arg)) << std::endl; }); + // get block detail + string block_detail_arg; + auto getBlockDetail = get->add_subcommand("block_detail", localized("Retrieve a full block from the blockchain"), false); + getBlockDetail->add_option("block", block_detail_arg, localized("The number or ID of the block to retrieve"))->required(); + getBlockDetail->set_callback([&block_detail_arg] { + auto arg = fc::mutable_variant_object("block_num_or_id", block_detail_arg); + std::cout << fc::json::to_pretty_string(call(get_block_detail_func, arg)) << std::endl; + }); + // get actions string account_name; string skip_seq_str; diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 9e1481c23c3..946c8c83200 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -69,6 +69,10 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} if(BUILD_MONGO_DB_PLUGIN) target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} mongo_db_plugin -Wl,${no_whole_archive_flag} ) endif() +# kafka_plugin +target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} kafka_plugin -Wl,${no_whole_archive_flag} ) +# notify_plugin +target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} notify_plugin -Wl,${no_whole_archive_flag} ) include(additionalPlugins) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 6183b0020d5..b32d2a51315 100644 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -622,6 +622,126 @@ fi printf "\\tWASM found at %s/opt/wasm.\\n" "${HOME}" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + function print_instructions() { printf "\\n\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 652da373244..bedf64f6058 100644 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -686,6 +686,126 @@ mongodconf printf "\\t - WASM found at %s/opt/wasm\\n" "${HOME}" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + printf "\\n" function print_instructions() diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index a91e1611d44..f3ee81cf467 100644 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -479,6 +479,126 @@ printf "\\tWASM found at /usr/local/wasm/bin/.\\n" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + function print_instructions() { printf "\\n\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 35cad3d7d8e..238781f2f8f 100644 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -492,6 +492,126 @@ printf "\\n\\tWASM found @ %s/opt/wasm\\n\\n" "${HOME}" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + function print_instructions() { printf "\\n\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 4c9873a60a1..ab3e8823d8e 100644 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -503,6 +503,126 @@ mongodconf printf "\\tWASM found at %s/opt/wasm/bin.\\n" "${HOME}" fi + printf "\\n\\tChecking for librdkafka with support.\\n" + RDKAFKA_DIR=/usr/local/include/librdkafka + if [ ! -d "${RDKAFKA_DIR}" ]; then + # Build librdkafka support: + printf "\\tInstalling librdkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/librdkafka" ]; then + if ! rm -rf "${TEMP_DIR}/librdkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/librdkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b v0.11.6 https://github.com/boscore/librdkafka.git + then + printf "\\tUnable to clone librdkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/" + then + printf "\\tUnable to enter directory %s/librdkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -H. -B_cmake_build + then + printf "\\tError cmake_build librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build + then + printf "\\tError compiling cmake -DRDKAFKA_BUILD_STATIC=1 --build _cmake_build , librdkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/librdkafka/_cmake_build" + then + printf "\\tUnable to enter directory %s/librdkafka/_cmake_build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install librdkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tlibrdkafka successffully installed @ %s.\\n\\n" "${RDKAFKA_DIR}" + else + printf "\\t librdkafka found at %s.\\n" "${RDKAFKA_DIR}" + fi + + printf "\\n\\tChecking for cppkafka with support.\\n" + CPPKAFKA_DIR=/usr/local/include/cppkafka + if [ ! -d "${CPPKAFKA_DIR}" ]; then + # Build cppkafka support: + printf "\\tInstalling cppkafka\\n" + if ! cd "${TEMP_DIR}" + then + printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if [ -d "${TEMP_DIR}/cppkafka" ]; then + if ! rm -rf "${TEMP_DIR}/cppkafka" + then + printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "${TEMP_DIR}/cppkafka/" "${BASH_SOURCE[0]}" + printf "\\tExiting now.\\n\\n" + exit 1; + fi + fi + if ! git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git + then + printf "\\tUnable to clone cppkafka repo.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/" + then + printf "\\tUnable to enter directory %s/cppkafka/.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! mkdir build + then + printf "\\tUnable to remove directory build.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cd "${TEMP_DIR}/cppkafka/build" + then + printf "\\tUnable to enter directory %s/cppkafka/build.\\n" "${TEMP_DIR}" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. + then + printf "\\tError compiling cmake -DCPPKAFKA_RDKAFKA_STATIC_LIB=1 -DCPPKAFKA_BUILD_SHARED=0 .. , cppkafka.1\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + if ! sudo make install + then + printf "\\tUnable to make install cppkafka.\\n" + printf "\\n\\tExiting now.\\n" + exit 1; + fi + printf "\\n\\tcppkafka successffully installed @ %s.\\n\\n" "${CPPKAFKA_DIR}" + else + printf "\\t cppkafka found at %s.\\n" "${CPPKAFKA_DIR}" + fi + function print_instructions() { printf '\n\texport PATH=${HOME}/opt/mongodb/bin:$PATH\n' diff --git a/unittests/actiondemo/actiondemo.abi b/unittests/actiondemo/actiondemo.abi new file mode 100644 index 00000000000..172d180bfef --- /dev/null +++ b/unittests/actiondemo/actiondemo.abi @@ -0,0 +1,99 @@ +{ + "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-11-17T13:26:02", + "version": "eosio::abi/1.0", + "types": [], + "structs": [{ + "name": "seedobj", + "base": "", + "fields": [{ + "name": "id", + "type": "uint64" + },{ + "name": "create", + "type": "time_point" + },{ + "name": "seedstr", + "type": "string" + },{ + "name": "txid", + "type": "string" + },{ + "name": "action", + "type": "uint64" + } + ] + },{ + "name": "args", + "base": "", + "fields": [{ + "name": "loop", + "type": "uint64" + },{ + "name": "num", + "type": "uint64" + } + ] + },{ + "name": "generate", + "base": "", + "fields": [{ + "name": "t", + "type": "args" + } + ] + },{ + "name": "clear", + "base": "", + "fields": [] + },{ + "name": "args_inline", + "base": "", + "fields": [{ + "name": "payer", + "type": "name" + },{ + "name": "in", + "type": "name" + } + ] + },{ + "name": "inlineact", + "base": "", + "fields": [{ + "name": "t", + "type": "args_inline" + } + ] + } + ], + "actions": [{ + "name": "generate", + "type": "generate", + "ricardian_contract": "" + },{ + "name": "clear", + "type": "clear", + "ricardian_contract": "" + },{ + "name": "inlineact", + "type": "inlineact", + "ricardian_contract": "" + } + ], + "tables": [{ + "name": "seedobjs", + "index_type": "i64", + "key_names": [ + "id" + ], + "key_types": [ + "uint64" + ], + "type": "seedobj" + } + ], + "ricardian_clauses": [], + "error_messages": [], + "abi_extensions": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/actiondemo/actiondemo.cpp b/unittests/actiondemo/actiondemo.cpp new file mode 100644 index 00000000000..3f8a3fcb6e0 --- /dev/null +++ b/unittests/actiondemo/actiondemo.cpp @@ -0,0 +1,106 @@ +#include "actiondemo.hpp" +#include "../../contracts/eosiolib/print.hpp" +#include "../../contracts/eosiolib/types.hpp" +#include "../../contracts/eosiolib/transaction.hpp" + +namespace spaceaction { + + void actiondemo::apply( account_name code, account_name act ) { + + if( code != _self ) + return; + + switch( act ) { + case N(generate): + generate(unpack_action_data()); + return; + case N(inlineact): + inlineact(unpack_action_data()); + case N(clear): + clear(); + return; + } + } + + void actiondemo::clear(){ + //require_auth(_self); + seedobjs table(_self, _self); + auto iter = table.begin(); + while (iter != table.end()) + { + table.erase(iter); + iter = table.begin(); + } + } + + std::string to_hex( const char* d, uint32_t s ) + { + std::string r; + const char* to_hex="0123456789abcdef"; + uint8_t* c = (uint8_t*)d; + for( uint32_t i = 0; i < s; ++i ) + (r += to_hex[(c[i]>>4)]) += to_hex[(c[i] &0x0f)]; + return r; + } + + void actiondemo::generate(const args& t){ + for (int i = 0; i < t.loop; ++i) { + transaction_id_type txid; + get_transaction_id(&txid); + std::string tx = to_hex((char*)&txid.hash, 32); + + uint64_t seq = 0; + get_action_sequence(&seq); + + + size_t szBuff = sizeof(signature); + char buf[szBuff]; + memset(buf,0,szBuff); + size_t size = bpsig_action_time_seed(buf, sizeof(buf)); + eosio_assert(size > 0 && size <= sizeof(buf), "buffer is too small"); + std::string seedstr = to_hex(buf,size); + + + seedobjs table(_self, _self); + uint64_t count = 0; + for (auto itr = table.begin(); itr != table.end(); ++itr) { + ++count; + } + + auto r = table.emplace(_self, [&](auto &a) { + a.id = count + 1; + a.create = eosio::time_point_sec(now()); + a.seedstr = seedstr; + a.txid = tx; + a.action = seq; + }); + print_f("self:%, loop:%, count:%, seedstr:%", name{_self}.to_string(), t.loop, count, r->seedstr); + } + } + + void actiondemo::inlineact(const args_inline& t){ + auto& payer = t.payer; + args gen; + gen.loop = 1; + gen.num = 1; + + generate(gen); + + if(t.in != 0) + { + INLINE_ACTION_SENDER(spaceaction::actiondemo, generate)( t.in, {payer,N(active)}, + { gen}); + INLINE_ACTION_SENDER(spaceaction::actiondemo, generate)( t.in, {payer,N(active)}, + { gen}); + } + + } +} + +extern "C" { +[[noreturn]] void apply(uint64_t receiver, uint64_t code, uint64_t action) { + spaceaction::actiondemo obj(receiver); + obj.apply(code, action); + eosio_exit(0); +} +} \ No newline at end of file diff --git a/unittests/actiondemo/actiondemo.hpp b/unittests/actiondemo/actiondemo.hpp new file mode 100644 index 00000000000..e1d5031bfa5 --- /dev/null +++ b/unittests/actiondemo/actiondemo.hpp @@ -0,0 +1,50 @@ +#pragma once +#include +#include + +namespace spaceaction { + + using namespace eosio; + class actiondemo : public contract { + typedef std::chrono::milliseconds duration; + public: + actiondemo( account_name self ):contract(self){} + + void apply( account_name contract, account_name act ); + + struct args{ + uint64_t loop; + uint64_t num; + }; + //@abi action + void generate(const args& t); + + //@abi action + void clear(); + + + struct args_inline{ + account_name payer; + account_name in; + }; + //@abi action + void inlineact(const args_inline& t); + + public: + // @abi table seedobjs i64 + struct seedobj { + uint64_t id; + time_point create; + std::string seedstr; + std::string txid; + uint64_t action; + + uint64_t primary_key()const { return id; } + EOSLIB_SERIALIZE(seedobj,(id)(create)(seedstr)(txid)(action)) + }; + typedef eosio::multi_index< N(seedobjs), seedobj> seedobjs; + + + }; + +} /// namespace eosio diff --git a/unittests/actiondemo/test.py b/unittests/actiondemo/test.py new file mode 100644 index 00000000000..5ced2b4276c --- /dev/null +++ b/unittests/actiondemo/test.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 + +import argparse +import json + +import os + +import subprocess + +import time + +args = None +logFile = None + +unlockTimeout = 999999 + +systemAccounts = [ + 'eosio.bpay', + 'eosio.msig', + 'eosio.names', + 'eosio.ram', + 'eosio.ramfee', + 'eosio.saving', + 'eosio.stake', + 'eosio.token', + 'eosio.vpay', +] + + +def jsonArg(a): + return " '" + json.dumps(a) + "' " + +def run(args): + print('testtool.py:', args) + logFile.write(args + '\n') + if subprocess.call(args, shell=True): + print('testtool.py: exiting because of error') + #sys.exit(1) + +def retry(args): + while True: + print('testtool.py:', args) + logFile.write(args + '\n') + if subprocess.call(args, shell=True): + print('*** Retry') + else: + break + +def background(args): + print('testtool.py:', args) + logFile.write(args + '\n') + return subprocess.Popen(args, shell=True) + +def getOutput(args): + print('testtool.py:', args) + logFile.write(args + '\n') + proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE) + return proc.communicate()[0].decode('utf-8') + +def getJsonOutput(args): + print('testtool.py:', args) + logFile.write(args + '\n') + proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE) + return json.loads(proc.communicate()[0].decode('utf-8')) + +def sleep(t): + print('sleep', t, '...') + time.sleep(t) + print('resume') + +def startWallet(): + run('rm -rf ' + os.path.abspath(args.wallet_dir)) + run('mkdir -p ' + os.path.abspath(args.wallet_dir)) + background(args.keosd + ' --unlock-timeout %d --http-server-address 127.0.0.1:6666 --wallet-dir %s' % (unlockTimeout, os.path.abspath(args.wallet_dir))) + sleep(4) + run(args.cleos + 'wallet create --file ./unlock.key ' ) + +def importKeys(): + run(args.cleos + 'wallet import --private-key ' + args.private_key) + +# def createStakedAccounts(b, e): +# for i in range(b, e): +# a = accounts[i] +# stake = 100 +# run(args.cleos + 'system newaccount eosio --transfer ' + a['name'] + ' ' + a['pub'] + ' --stake-net "' + stake + '" --stake-cpu "' + stake + '"') + + +def stepStartWallet(): + startWallet() + importKeys() + # run('rm -rf ~/.local/share/eosio/nodeos/data ') + run("rm -rf ./data/*") + background(args.nodeos + ' -e -p eosio --blocks-dir ./data/block/ --genesis-json %s --config-dir ./ --data-dir ./data/ --plugin eosio::http_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_plugin --plugin eosio::history_api_plugin> eos.log 2>&1 &' % args.genesis) + run("rm -rf ./data2/*") + background(args.nodeos + ' --blocks-dir ./data2/block/ --genesis-json %s --data-dir ./data2/ --config-dir ./ --p2p-peer-address 127.0.0.1:9876 --http-server-address 0.0.0.0:8001 --p2p-listen-endpoint 0.0.0.0:9001 --plugin eosio::http_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_plugin --plugin eosio::history_api_plugin > eos2.log 2>&1 &' % args.genesis) + sleep(30) + + +def createAccounts(): + for a in systemAccounts: + run(args.cleos + 'create account eosio ' + a + ' ' + args.public_key) + run(args.cleos + 'set contract eosio.token ' + args.contracts_dir + 'eosio.token/') + run(args.cleos + 'set contract eosio.msig ' + args.contracts_dir + 'eosio.msig/') + run(args.cleos + 'push action eosio.token create \'["eosio", "10000000000.0000 %s"]\' -p eosio.token' % (args.symbol)) + run(args.cleos + 'push action eosio.token issue \'["eosio", "%s %s", "memo"]\' -p eosio' % ("1000000.0000", args.symbol)) + retry(args.cleos + 'set contract eosio ' + args.contracts_dir + 'eosio.system/ -p eosio') + sleep(1) + run(args.cleos + 'push action eosio setpriv' + jsonArg(['eosio.msig', 1]) + '-p eosio@active') + + for a in accounts: + run(args.cleos + 'system newaccount --stake-net "10.0000 %s" --stake-cpu "10.0000 %s" --buy-ram-kbytes 80 eosio ' %(args.symbol,args.symbol) + a + ' ' + args.public_key) + + run(args.cleos + 'system newaccount --stake-net "10.0000 %s" --stake-cpu "10.0000 %s" --buy-ram-kbytes 80 eosio '%(args.symbol,args.symbol) + 'cochaintoken' + ' ' + args.public_key) + + run(args.cleos + 'system buyram eosio %s -k 80000 -p eosio ' % args.contract ) + run(args.cleos + 'system delegatebw eosio %s "1000.0000 SYS" "1000.0000 SYS"'% args.contract ) + + run(args.cleos + 'system buyram eosio %s -k 80000 -p eosio ' % args.contract2 ) + run(args.cleos + 'system delegatebw eosio %s "1000.0000 SYS" "1000.0000 SYS"'% args.contract2 ) + +# stepIssueToken() +# +# +# def stepIssueToken(): +# run(args.cleos + 'push action eosio.token issue \'["eosio", "%s %s", "memo"]\' -p eosio' % ("1000000.0000", args.symbol)) +# for i in accounts: +# run(args.cleos + 'push action eosio.token issue \'["%s", "%s %s", "memo"]\' -p eosio' % (i, "1000000.0000", args.symbol)) +# +# sleep(1) + + +def stepKillAll(): + run('killall keosd nodeos || true') + sleep(1.5) +# Command Line Arguments + +def stepInitCaee(): + print ("=========================== set contract caee ===========================" ) + run(args.cleos + 'set contract %s ../actiondemo' %args.contract ) + run(args.cleos + 'set contract %s ../actiondemo' %args.contract2 ) + run(args.cleos + 'set account permission %s active \'{"threshold": 1,"keys": [{"key": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","weight": 1}],"accounts": [{"permission":{"actor":"%s","permission":"eosio.code"},"weight":1}]}\' ' % (args.contract,args.contract)) + print ("sleep 5") + + +def stepClear(): + print ("=========================== set contract clear ===========================" ) + run(args.cleos + 'push action %s clear "[]" -p %s ' %(args.contract, args.contract)) + run(args.cleos + 'get table %s %s seedobjs' %(args.contract, args.contract) ) + run(args.cleos + 'push action %s clear "[]" -p %s ' %(args.contract2, args.contract2)) + run(args.cleos + 'get table %s %s seedobjs' %(args.contract2, args.contract2) ) + print ("sleep 5") + + +def stepGenerate(): + print ("=========================== set contract stepGenerate ===========================" ) + # run(args.cleos + 'push action %s generate \'[{"loop":1, "num":1}]\' -p %s ' %(args.contract, args.contract)) + run(args.cleos + 'push action %s inlineact \'[{"payer":"%s", "in":"%s"}]\' -p %s ' %(args.contract,args.contract,args.contract2, args.contract)) + run(args.cleos + 'get table %s %s seedobjs' %(args.contract, args.contract) ) + run(args.cleos + 'get table %s %s seedobjs' %(args.contract2, args.contract2) ) + print ("sleep 5") + + +parser = argparse.ArgumentParser() + +commands = [ + ('k', 'kill', stepKillAll, True, ""), + ('w', 'wallet', stepStartWallet, True, "Start keosd, create wallet"), + ('s', 'sys', createAccounts, True, "Create all accounts"), + ('i', 'init', stepInitCaee, True, "stepInitCaee"), + ('c', 'clear', stepClear, True, "stepInitCaee"), + ('g', 'generate', stepGenerate, True, "stepInitCaee"), +] + +parser.add_argument('--public-key', metavar='', help="EOSIO Public Key", default='EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV', dest="public_key") +parser.add_argument('--private-Key', metavar='', help="EOSIO Private Key", default='5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3', dest="private_key") +parser.add_argument('--cleos', metavar='', help="Cleos command", default='../../build/programs/cleos/cleos --wallet-url http://127.0.0.1:6666 ') +parser.add_argument('--nodeos', metavar='', help="Path to nodeos binary", default='../../build/programs/nodeos/nodeos ') +parser.add_argument('--keosd', metavar='', help="Path to keosd binary", default='../../build/programs/keosd/keosd ') +parser.add_argument('--contracts-dir', metavar='', help="Path to contracts directory", default='../../build/contracts/') +parser.add_argument('--nodes-dir', metavar='', help="Path to nodes diretodctory", default='./') +parser.add_argument('--genesis', metavar='', help="Path to genesis.json", default="./genesis.json") +parser.add_argument('--wallet-dir', metavar='', help="Path to wallet directory", default='./wallet/') +parser.add_argument('--log-path', metavar='', help="Path to log file", default='./output.log') +# parser.add_argument('--symbol', metavar='', help="The eosio.system symbol", default='SYS') +parser.add_argument('-a', '--all', action='store_true', help="Do everything marked with (*)") +#parser.add_argument('-H', '--http-port', type=int, default=8888, metavar='', help='HTTP port for cleos') + +for (flag, command, function, inAll, help) in commands: + prefix = '' + if inAll: prefix += '*' + if prefix: help = '(' + prefix + ') ' + help + if flag: + parser.add_argument('-' + flag, '--' + command, action='store_true', help=help, dest=command) + else: + parser.add_argument('--' + command, action='store_true', help=help, dest=command) + +args = parser.parse_args() + +args.cleos += '--url http://127.0.0.1:8888 ' +args.symbol = 'SYS' +args.contract = 'caee' +args.contract2 = 'caee2' + + +accnum = 26 +accounts = [] +# for i in range(97,97+accnum): +# accounts.append("user%c"% chr(i)) +# accounts.append("payman") +accounts.append(args.contract) +accounts.append(args.contract2) + +logFile = open(args.log_path, 'a') +logFile.write('\n\n' + '*' * 80 + '\n\n\n') + +haveCommand = False +for (flag, command, function, inAll, help) in commands: + if getattr(args, command) or inAll and args.all: + if function: + haveCommand = True + function() +if not haveCommand: + print('testtool.py: Tell me what to do. -a does almost everything. -h shows options.') \ No newline at end of file diff --git a/unittests/database_gmr_blklst_tests.cpp b/unittests/database_gmr_blklst_tests.cpp new file mode 100644 index 00000000000..f448ba5a172 --- /dev/null +++ b/unittests/database_gmr_blklst_tests.cpp @@ -0,0 +1,309 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef NON_VALIDATING_TEST +#define TESTER tester +#else +#define TESTER validating_tester +#endif + +using namespace eosio::chain; +using namespace eosio::testing; +namespace bfs = boost::filesystem; + +BOOST_AUTO_TEST_SUITE(database_gmr_blklst_tests) + +vector parse_list_string(string items) +{ + vector item_list; + vector itemlist; + boost::split(itemlist, items, boost::is_any_of(",")); + for (string item : itemlist) + { + item_list.push_back(string_to_name(item.c_str())); + } + + return item_list; +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(list_config_parse_test) +{ + try + { + TESTER test; + + string str = "alice,bob,tom"; + vector list = parse_list_string(str); + BOOST_TEST(list.size() > 0); + account_name n = N(a); + if (list.size() > 0) + { + n = *(list.begin()); + } + + BOOST_TEST(n != N(a)); + BOOST_TEST(n == N(alice)); + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(set_name_list_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + string str = "alice,bob,tom"; + vector list = parse_list_string(str); + + flat_set nameset(list.begin(), list.end()); + + test.control->set_actor_blacklist(nameset); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.cfg.actor_blacklist = {N(a)}; + a.cfg.contract_blacklist = {N(a)}; + a.cfg.resource_greylist = {N(a)}; + }); + + int64_t lt = static_cast(list_type::actor_blacklist_type); + int64_t lat = static_cast(list_action_type::insert_type); + test.control->set_name_list(lt, lat, list); + + + + + const flat_set& ab = test.control->get_actor_blacklist(); + const flat_set& cb = test.control->get_contract_blacklist(); + const flat_set& rg = test.control->get_resource_greylist(); + + + + + auto convert_names = [&](const shared_vector& namevec, flat_set& nameset) -> void { + for(const auto& a :namevec) + { + nameset.insert(uint64_t(a)); + } + }; + + flat_set aab; + flat_set acb; + flat_set arg; + + const global_property2_object &ptr1 = db.get(); + chain_config2 c = ptr1.cfg; + + BOOST_TEST(c.actor_blacklist.size() == 4); + BOOST_TEST(ab.size() == 4); + + convert_names(c.actor_blacklist, aab); + convert_names(c.contract_blacklist, acb); + convert_names(c.resource_greylist, arg); + + + if (c.actor_blacklist.size() == 4) + { + + bool b = (aab.find(N(a)) != aab.end()); + BOOST_TEST(b); + } + + bool d = ab.find(N(a)) != ab.end(); + BOOST_TEST(d); + bool m = aab.find(N(alice)) != aab.end(); + BOOST_TEST(m); + + // Undo creation of the account + ses.undo(); + + + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(actor_blacklist_config_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + // string str= "alice,bob,tom"; + // vector list = parse_list_string(str); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.cfg.actor_blacklist = {N(a)}; + }); + + chain_config2 a = ptr.cfg; + + account_name v; + if (a.actor_blacklist.size() > 0) + { + v = *(a.actor_blacklist.begin()); + } + + std::size_t s = a.actor_blacklist.size(); + + BOOST_TEST(1 == s); + + BOOST_TEST(v == N(a)); + + // Undo creation of the account + ses.undo(); + + + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(contract_blacklist_config_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + // string str= "alice,bob,tom"; + // vector list = parse_list_string(str); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.cfg.contract_blacklist = {N(a)}; + }); + + chain_config2 a = ptr.cfg; + + account_name v ; + if (a.contract_blacklist.size() > 0) + { + v = *(a.contract_blacklist.begin()); + } + + std::size_t s = a.contract_blacklist.size(); + + BOOST_TEST(1 == s); + + BOOST_TEST(v == N(a)); + + // Undo creation of the account + ses.undo(); + + + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(resource_greylist_config_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + // string str= "alice,bob,tom"; + // vector list = parse_list_string(str); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.cfg.resource_greylist = {N(a)}; + }); + + chain_config2 a = ptr.cfg; + + account_name v ; + if (a.resource_greylist.size() > 0) + { + v = *(a.resource_greylist.begin()); + } + + std::size_t s = a.resource_greylist.size(); + + BOOST_TEST(1 == s); + + BOOST_TEST(v == N(a)); + + // Undo creation of the account + ses.undo(); + + } + FC_LOG_AND_RETHROW() +} + +// Simple tests of undo infrastructure +BOOST_AUTO_TEST_CASE(gmrource_limit_config_test) +{ + try + { + TESTER test; + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + chainbase::database &db = const_cast(test.control->db()); + + auto ses = db.start_undo_session(true); + + // Make sure we can retrieve that account by name + const global_property2_object &ptr = db.get(); + + // Create an account + db.modify(ptr, [&](global_property2_object &a) { + a.gmr.cpu_us = 100; + a.gmr.net_byte = 1024; + a.gmr.ram_byte = 1; + }); + + BOOST_TEST(ptr.gmr.cpu_us == 100); + BOOST_TEST(ptr.gmr.net_byte == 1024); + BOOST_TEST(ptr.gmr.ram_byte == 1); + + // Undo creation of the account + ses.undo(); + + + } + FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/database_tests.cpp b/unittests/database_tests.cpp index ac97f6c21a6..8f9d3553928 100644 --- a/unittests/database_tests.cpp +++ b/unittests/database_tests.cpp @@ -28,7 +28,8 @@ BOOST_AUTO_TEST_SUITE(database_tests) TESTER test; // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. - eosio::chain::database& db = const_cast( test.control->db() ); + // eosio::chain::database& db = const_cast( test.control->db() ); + chainbase::database &db = const_cast(test.control->db()); auto ses = db.start_undo_session(true); diff --git a/unittests/gmr_test.cpp b/unittests/gmr_test.cpp new file mode 100644 index 00000000000..3874552d0e5 --- /dev/null +++ b/unittests/gmr_test.cpp @@ -0,0 +1,234 @@ +#include +#include +#include +#include +#include +#include + +#include +#ifdef NON_VALIDATING_TEST +#define TESTER tester +#else +#define TESTER validating_tester +#endif + +using namespace eosio::chain::resource_limits; +using namespace eosio::testing; +using namespace eosio::chain; + +class gmr_fixture : private chainbase_fixture<512 * 1024>, public resource_limits_manager +{ + public: + gmr_fixture() + : chainbase_fixture(), resource_limits_manager(*chainbase_fixture::_db) + { + add_indices(); + initialize_database(); + } + + ~gmr_fixture() {} + + chainbase::database::session start_session() + { + return chainbase_fixture::_db->start_undo_session(true); + } +}; + +BOOST_AUTO_TEST_SUITE(gmr_test) + +BOOST_FIXTURE_TEST_CASE(check_block_limits_cpu, gmr_fixture) +try +{ + const account_name account(1); + const uint64_t increment = 10000; + initialize_account(account); + set_account_limits(account, 1000, 0, 0); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 0, 10000); + set_account_limits(N(everyone), 0, 0, 10000000000000ll); + + + process_account_limit_updates(); + + // uint16_t gmrource_limit_per_day = 100; + + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + + // test.control->startup(); + + // // Make sure we can no longer find + + const uint64_t expected_iterations = config::default_gmr_cpu_limit / increment; + + for (int idx = 0; idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, increment, 0, 0); + process_block_usage(idx); + } + + auto arl = get_account_cpu_limit_ex(account, true); + + BOOST_TEST(arl.available >= 9997); + BOOST_REQUIRE_THROW(add_transaction_usage({account}, increment, 0, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + +BOOST_FIXTURE_TEST_CASE(check_block_limits_cpu_lowerthan, gmr_fixture) +try +{ + const account_name account(1); + const uint64_t increment = 10000; + initialize_account(account); + set_account_limits(account, increment, 0, 0); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 0, 10000); + set_account_limits(N(everyone), 0, 0, 10000000000000ll); + process_account_limit_updates(); + + const uint64_t expected_iterations = config::default_gmr_cpu_limit / increment; + + for (int idx = 0; idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, increment, 0, 0); + process_block_usage(idx); + } + + auto arl = get_account_cpu_limit_ex(account, true); + BOOST_TEST(arl.available >= 9997); + + // BOOST_REQUIRE_THROW(add_transaction_usage({account}, increment, 0, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + +BOOST_FIXTURE_TEST_CASE(check_block_limits_net_lowerthan, gmr_fixture) +try +{ + const account_name account(1); + const uint64_t increment = 1000; + initialize_account(account); + set_account_limits(account, increment, 0, 0); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 10000, 0); + set_account_limits(N(everyone), 0, 10000000000000ll, 0); + process_account_limit_updates(); + + const uint64_t expected_iterations = config::default_gmr_net_limit / increment; + + for (int idx = 0; idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, 0, increment, 0); + process_block_usage(idx); + } + + auto arl = get_account_net_limit_ex(account, true); + BOOST_TEST(arl.available >= 1238); + + // BOOST_REQUIRE_THROW(add_transaction_usage({account}, 0,increment, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + +BOOST_FIXTURE_TEST_CASE(check_block_limits_ram, gmr_fixture) +try +{ + set_gmr_parameters( + { 1024, 200000,10240} + ); + + const account_name account(1); + const uint64_t increment = 1000; + initialize_account(account); + set_account_limits(account, increment, 10, 10); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 10000, 0); + set_account_limits(N(everyone), 0, 10000000000000ll, 0); + process_account_limit_updates(); + + const uint64_t expected_iterations = config::default_gmr_net_limit / increment; + + //for ( + int idx = 0;// idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, 0, increment, 0); + process_block_usage(idx); + } + + auto arl = get_account_net_limit_ex(account, true); + BOOST_TEST(arl.available >= 0); + + int64_t ram_bytes; + int64_t net_weight; + int64_t cpu_weight; + bool raw = false; + get_account_limits(account, ram_bytes, net_weight, cpu_weight, raw); + + BOOST_TEST(1024*2 == ram_bytes); + BOOST_TEST(10 == net_weight); + BOOST_TEST(10 == cpu_weight); + + + raw = true; + get_account_limits(account, ram_bytes, net_weight, cpu_weight, raw); + + BOOST_TEST(1024 == ram_bytes); + BOOST_TEST(10 == net_weight); + BOOST_TEST(10 == cpu_weight); + + // BOOST_REQUIRE_THROW(add_transaction_usage({account}, 0,increment, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + + + +BOOST_FIXTURE_TEST_CASE(get_account_limits_res, gmr_fixture) +try +{ + const account_name account(1); + const uint64_t increment = 1000; + initialize_account(account); + set_account_limits(account, increment+24, 0, 0); + initialize_account(N(dan)); + initialize_account(N(everyone)); + set_account_limits(N(dan), 0, 10000, 0); + set_account_limits(N(everyone), 0, 10000000000000ll, 0); + process_account_limit_updates(); + + const uint64_t expected_iterations = config::default_gmr_net_limit / increment; + + for (int idx = 0; idx < expected_iterations-1; idx++) + { + add_transaction_usage({account}, 0, increment, 0); + process_block_usage(idx); + } + + auto arl = get_account_net_limit_ex(account, true); + BOOST_TEST(arl.available > 0); + + int64_t ram_bytes; + int64_t net_weight; + int64_t cpu_weight; + bool raw = false; + get_account_limits(account, ram_bytes, net_weight, cpu_weight, raw); + + BOOST_TEST(1024 == ram_bytes); + BOOST_TEST(0 == net_weight); + BOOST_TEST(0 == cpu_weight); + + + raw = true; + get_account_limits(account, ram_bytes, net_weight, cpu_weight, raw); + + BOOST_TEST(1024 == ram_bytes); + BOOST_TEST(0 == net_weight); + BOOST_TEST(0 == cpu_weight); + + + // BOOST_REQUIRE_THROW(add_transaction_usage({account}, 0,increment, 0), block_resource_exhausted); +} +FC_LOG_AND_RETHROW(); + + +BOOST_AUTO_TEST_SUITE_END() From daa83fd069e1a8dac647b29f12f8b35d3f0314ec Mon Sep 17 00:00:00 2001 From: thaipanda <45444502+Thaipanda@users.noreply.github.com> Date: Mon, 17 Dec 2018 11:24:28 +0800 Subject: [PATCH 08/21] merge 1.0.3 to develop (#16) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * add README files * update info * prepare for v1.0.2 * merge v1.0.2 (#12) * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * finish BOS basic functions * add README files * update info * Release/1.0.x (#11) * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * add README files * update info * readme for kafka & add time for action (#5) * 重启 节点,黑名单 失效,fixes #7 (#8) * restart sync list db * recovery system account bos to eosio * recovery system account bos to eosio * recovery system account bos to eosio * Fix/#3 notify plugin (#10) * Add debug info * comment log * rm log for notify_plugin * prepare for v1.0.2 * patch the EOSIO 1.5.1 security bug fixes * prepare for v1.0.3 * adjust the slogon --- CMakeLists.txt | 2 +- Docker/README.md | 4 +- README.md | 4 +- README_CN.md | 2 +- libraries/chain/apply_context.cpp | 108 ++++++++++++++---- libraries/chain/authorization_manager.cpp | 11 +- .../eosio/chain/authorization_manager.hpp | 3 +- 7 files changed, 100 insertions(+), 34 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1451955870c..0309bb0b5e4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,7 +35,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 0) -set(VERSION_PATCH 2) +set(VERSION_PATCH 3) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) diff --git a/Docker/README.md b/Docker/README.md index 9ff75404b7d..ffdfe07e2c5 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd bos/Docker docker build . -t boscore/bos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.2 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.3 tag, you could do the following: ```bash -docker build -t boscore/bos:v1.0.2 --build-arg branch=v1.0.2 . +docker build -t boscore/bos:v1.0.3 --build-arg branch=v1.0.3 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index 43fcdfa2d61..eecfaf7ec1c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# BOSCore - Born for DApp, be more useable. +# BOSCore - Born for DApps. Born for Usability. -## BOSCore Version: v1.0.2 +## BOSCore Version: v1.0.3 ### Basic EOSIO Version: v1.4.4 # Background diff --git a/README_CN.md b/README_CN.md index d4701484fb9..a1f55247bcf 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,6 +1,6 @@ # BOSCore - 更可用的链,为DApp而生。 -## BOSCore Version: v1.0.2 +## BOSCore Version: v1.0.3 ### Basic EOSIO Version: v1.4.4 # 背景 diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 07ab384eeff..4e9807ed85c 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -209,6 +209,15 @@ void apply_context::execute_inline( action&& a ) { EOS_ASSERT( code != nullptr, action_validate_exception, "inline action's code account ${account} does not exist", ("account", a.account) ); + bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated + bool send_to_self = (a.account == receiver); + bool inherit_parent_authorizations = (!disallow_send_to_self_bypass && send_to_self && (receiver == act.account) && control.is_producing_block()); + + flat_set inherited_authorizations; + if( inherit_parent_authorizations ) { + inherited_authorizations.reserve( a.authorization.size() ); + } + for( const auto& auth : a.authorization ) { auto* actor = control.db().find(auth.actor); EOS_ASSERT( actor != nullptr, action_validate_exception, @@ -216,22 +225,45 @@ void apply_context::execute_inline( action&& a ) { EOS_ASSERT( control.get_authorization_manager().find_permission(auth) != nullptr, action_validate_exception, "inline action's authorizations include a non-existent permission: ${permission}", ("permission", auth) ); + + if( inherit_parent_authorizations && std::find(act.authorization.begin(), act.authorization.end(), auth) != act.authorization.end() ) { + inherited_authorizations.insert( auth ); + } } - // No need to check authorization if: replaying irreversible blocks; contract is privileged; or, contract is calling itself. - if( !control.skip_auth_check() && !privileged && a.account != receiver ) { - control.get_authorization_manager() - .check_authorization( {a}, - {}, - {{receiver, config::eosio_code_name}}, - control.pending_block_time() - trx_context.published, - std::bind(&transaction_context::checktime, &this->trx_context), - false - ); - - //QUESTION: Is it smart to allow a deferred transaction that has been delayed for some time to get away - // with sending an inline action that requires a delay even though the decision to send that inline - // action was made at the moment the deferred transaction was executed with potentially no forewarning? + // No need to check authorization if replaying irreversible blocks or contract is privileged + if( !control.skip_auth_check() && !privileged ) { + try { + control.get_authorization_manager() + .check_authorization( {a}, + {}, + {{receiver, config::eosio_code_name}}, + control.pending_block_time() - trx_context.published, + std::bind(&transaction_context::checktime, &this->trx_context), + false, + inherited_authorizations + ); + + //QUESTION: Is it smart to allow a deferred transaction that has been delayed for some time to get away + // with sending an inline action that requires a delay even though the decision to send that inline + // action was made at the moment the deferred transaction was executed with potentially no forewarning? + } catch( const fc::exception& e ) { + if( disallow_send_to_self_bypass || !send_to_self ) { + throw; + } else if( control.is_producing_block() ) { + subjective_block_production_exception new_exception(FC_LOG_MESSAGE( error, "Authorization failure with inline action sent to self")); + for (const auto& log: e.get_log()) { + new_exception.append_log(log); + } + throw new_exception; + } + } catch( ... ) { + if( disallow_send_to_self_bypass || !send_to_self ) { + throw; + } else if( control.is_producing_block() ) { + EOS_THROW(subjective_block_production_exception, "Unexpected exception occurred validating inline action sent to self"); + } + } } _inline_actions.emplace_back( move(a) ); @@ -268,16 +300,30 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a require_authorization(payer); /// uses payer's storage } - // if a contract is deferring only actions to itself then there is no need - // to check permissions, it could have done everything anyway. - bool check_auth = false; - for( const auto& act : trx.actions ) { - if( act.account != receiver ) { - check_auth = true; - break; + // Originally this code bypassed authorization checks if a contract was deferring only actions to itself. + // The idea was that the code could already do whatever the deferred transaction could do, so there was no point in checking authorizations. + // But this is not true. The original implementation didn't validate the authorizations on the actions which allowed for privilege escalation. + // It would make it possible to bill RAM to some unrelated account. + // Furthermore, even if the authorizations were forced to be a subset of the current action's authorizations, it would still violate the expectations + // of the signers of the original transaction, because the deferred transaction would allow billing more CPU and network bandwidth than the maximum limit + // specified on the original transaction. + // So, the deferred transaction must always go through the authorization checking if it is not sent by a privileged contract. + // However, the old logic must still be considered because it cannot objectively change until a consensus protocol upgrade. + + bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated + + auto is_sending_only_to_self = [&trx]( const account_name& self ) { + bool send_to_self = true; + for( const auto& act : trx.actions ) { + if( act.account != self ) { + send_to_self = false; + break; + } } - } - if( check_auth ) { + return send_to_self; + }; + + try { control.get_authorization_manager() .check_authorization( trx.actions, {}, @@ -286,6 +332,22 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a std::bind(&transaction_context::checktime, &this->trx_context), false ); + } catch( const fc::exception& e ) { + if( disallow_send_to_self_bypass || !is_sending_only_to_self(receiver) ) { + throw; + } else if( control.is_producing_block() ) { + subjective_block_production_exception new_exception(FC_LOG_MESSAGE( error, "Authorization failure with sent deferred transaction consisting only of actions to self")); + for (const auto& log: e.get_log()) { + new_exception.append_log(log); + } + throw new_exception; + } + } catch( ... ) { + if( disallow_send_to_self_bypass || !is_sending_only_to_self(receiver) ) { + throw; + } else if( control.is_producing_block() ) { + EOS_THROW(subjective_block_production_exception, "Unexpected exception occurred validating sent deferred transaction consisting only of actions to self"); + } } } diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 832f69c71cd..6725468cf97 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -431,7 +431,8 @@ namespace eosio { namespace chain { const flat_set& provided_permissions, fc::microseconds provided_delay, const std::function& _checktime, - bool allow_unused_keys + bool allow_unused_keys, + const flat_set& satisfied_authorizations )const { const auto& checktime = ( static_cast(_checktime) ? _checktime : _noop_checktime ); @@ -488,9 +489,11 @@ namespace eosio { namespace chain { } } - auto res = permissions_to_satisfy.emplace( declared_auth, delay ); - if( !res.second && res.first->second > delay) { // if the declared_auth was already in the map and with a higher delay - res.first->second = delay; + if( satisfied_authorizations.find( declared_auth ) == satisfied_authorizations.end() ) { + auto res = permissions_to_satisfy.emplace( declared_auth, delay ); + if( !res.second && res.first->second > delay) { // if the declared_auth was already in the map and with a higher delay + res.first->second = delay; + } } } } diff --git a/libraries/chain/include/eosio/chain/authorization_manager.hpp b/libraries/chain/include/eosio/chain/authorization_manager.hpp index 9a75b5f80b1..a6df7ad2568 100644 --- a/libraries/chain/include/eosio/chain/authorization_manager.hpp +++ b/libraries/chain/include/eosio/chain/authorization_manager.hpp @@ -84,7 +84,8 @@ namespace eosio { namespace chain { const flat_set& provided_permissions = flat_set(), fc::microseconds provided_delay = fc::microseconds(0), const std::function& checktime = std::function(), - bool allow_unused_keys = false + bool allow_unused_keys = false, + const flat_set& satisfied_authorizations = flat_set() )const; From 8f36168c755fa760a51efc5f7e9ca760dd32bceb Mon Sep 17 00:00:00 2001 From: thaipanda <45444502+Thaipanda@users.noreply.github.com> Date: Mon, 17 Dec 2018 11:29:41 +0800 Subject: [PATCH 09/21] release v1.0.3 to master (#15) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * finish BOS basic functions * add README files * update info * Release/1.0.x (#11) * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * add README files * update info * readme for kafka & add time for action (#5) * 重启 节点,黑名单 失效,fixes #7 (#8) * restart sync list db * recovery system account bos to eosio * recovery system account bos to eosio * recovery system account bos to eosio * Fix/#3 notify plugin (#10) * Add debug info * comment log * rm log for notify_plugin * prepare for v1.0.2 * merge v1.0.2 (#13) * boscore basic improvement (#2) * kafka_plugin code * Automatic installation librdkafka/cppkafka * Feature/ci * Feature/48 kafka plugin * add CMakeModules/FindCppkafka.cmake * Production of block in time zone sequence * P2p self discovery * P2p self discovery * add notify_plugin * add api "get_block_detail" * add free res limit and blklst code * update free res limit and blklst code * update res code * update unittest code * revert submodule version * code typo * update blklist code * update sync name list db object error code * update code * update index code * Feature/5 ramdom * Revert "Merge branch 'feature/5-ramdom' into 'develop'" This reverts merge request !8 * adjust for setup BOSCore * change description * adjust the kafka plugin dependency be more special * use boscore repository to improve security * change version tag * finish for docker/builder * pass to build docker and update readme * add actionseed, global action sequence (#5) * delete renamed old file * BOSCore v1.0.1-1.4.3 * restructure the version schema * fix __gmpn_set_str error when build bos.contract * prepare for the v1.0.1 * add README files * update info * prepare for v1.0.2 * patch the EOSIO 1.5.1 security bug fixes * prepare for v1.0.3 * adjust the slogon --- CMakeLists.txt | 2 +- Docker/README.md | 4 +- README.md | 4 +- README_CN.md | 2 +- libraries/chain/apply_context.cpp | 108 ++++++++++++++---- libraries/chain/authorization_manager.cpp | 11 +- .../eosio/chain/authorization_manager.hpp | 3 +- 7 files changed, 100 insertions(+), 34 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1451955870c..0309bb0b5e4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,7 +35,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 0) -set(VERSION_PATCH 2) +set(VERSION_PATCH 3) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) diff --git a/Docker/README.md b/Docker/README.md index 9ff75404b7d..ffdfe07e2c5 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd bos/Docker docker build . -t boscore/bos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.2 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.3 tag, you could do the following: ```bash -docker build -t boscore/bos:v1.0.2 --build-arg branch=v1.0.2 . +docker build -t boscore/bos:v1.0.3 --build-arg branch=v1.0.3 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index 43fcdfa2d61..eecfaf7ec1c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# BOSCore - Born for DApp, be more useable. +# BOSCore - Born for DApps. Born for Usability. -## BOSCore Version: v1.0.2 +## BOSCore Version: v1.0.3 ### Basic EOSIO Version: v1.4.4 # Background diff --git a/README_CN.md b/README_CN.md index d4701484fb9..a1f55247bcf 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,6 +1,6 @@ # BOSCore - 更可用的链,为DApp而生。 -## BOSCore Version: v1.0.2 +## BOSCore Version: v1.0.3 ### Basic EOSIO Version: v1.4.4 # 背景 diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 07ab384eeff..4e9807ed85c 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -209,6 +209,15 @@ void apply_context::execute_inline( action&& a ) { EOS_ASSERT( code != nullptr, action_validate_exception, "inline action's code account ${account} does not exist", ("account", a.account) ); + bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated + bool send_to_self = (a.account == receiver); + bool inherit_parent_authorizations = (!disallow_send_to_self_bypass && send_to_self && (receiver == act.account) && control.is_producing_block()); + + flat_set inherited_authorizations; + if( inherit_parent_authorizations ) { + inherited_authorizations.reserve( a.authorization.size() ); + } + for( const auto& auth : a.authorization ) { auto* actor = control.db().find(auth.actor); EOS_ASSERT( actor != nullptr, action_validate_exception, @@ -216,22 +225,45 @@ void apply_context::execute_inline( action&& a ) { EOS_ASSERT( control.get_authorization_manager().find_permission(auth) != nullptr, action_validate_exception, "inline action's authorizations include a non-existent permission: ${permission}", ("permission", auth) ); + + if( inherit_parent_authorizations && std::find(act.authorization.begin(), act.authorization.end(), auth) != act.authorization.end() ) { + inherited_authorizations.insert( auth ); + } } - // No need to check authorization if: replaying irreversible blocks; contract is privileged; or, contract is calling itself. - if( !control.skip_auth_check() && !privileged && a.account != receiver ) { - control.get_authorization_manager() - .check_authorization( {a}, - {}, - {{receiver, config::eosio_code_name}}, - control.pending_block_time() - trx_context.published, - std::bind(&transaction_context::checktime, &this->trx_context), - false - ); - - //QUESTION: Is it smart to allow a deferred transaction that has been delayed for some time to get away - // with sending an inline action that requires a delay even though the decision to send that inline - // action was made at the moment the deferred transaction was executed with potentially no forewarning? + // No need to check authorization if replaying irreversible blocks or contract is privileged + if( !control.skip_auth_check() && !privileged ) { + try { + control.get_authorization_manager() + .check_authorization( {a}, + {}, + {{receiver, config::eosio_code_name}}, + control.pending_block_time() - trx_context.published, + std::bind(&transaction_context::checktime, &this->trx_context), + false, + inherited_authorizations + ); + + //QUESTION: Is it smart to allow a deferred transaction that has been delayed for some time to get away + // with sending an inline action that requires a delay even though the decision to send that inline + // action was made at the moment the deferred transaction was executed with potentially no forewarning? + } catch( const fc::exception& e ) { + if( disallow_send_to_self_bypass || !send_to_self ) { + throw; + } else if( control.is_producing_block() ) { + subjective_block_production_exception new_exception(FC_LOG_MESSAGE( error, "Authorization failure with inline action sent to self")); + for (const auto& log: e.get_log()) { + new_exception.append_log(log); + } + throw new_exception; + } + } catch( ... ) { + if( disallow_send_to_self_bypass || !send_to_self ) { + throw; + } else if( control.is_producing_block() ) { + EOS_THROW(subjective_block_production_exception, "Unexpected exception occurred validating inline action sent to self"); + } + } } _inline_actions.emplace_back( move(a) ); @@ -268,16 +300,30 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a require_authorization(payer); /// uses payer's storage } - // if a contract is deferring only actions to itself then there is no need - // to check permissions, it could have done everything anyway. - bool check_auth = false; - for( const auto& act : trx.actions ) { - if( act.account != receiver ) { - check_auth = true; - break; + // Originally this code bypassed authorization checks if a contract was deferring only actions to itself. + // The idea was that the code could already do whatever the deferred transaction could do, so there was no point in checking authorizations. + // But this is not true. The original implementation didn't validate the authorizations on the actions which allowed for privilege escalation. + // It would make it possible to bill RAM to some unrelated account. + // Furthermore, even if the authorizations were forced to be a subset of the current action's authorizations, it would still violate the expectations + // of the signers of the original transaction, because the deferred transaction would allow billing more CPU and network bandwidth than the maximum limit + // specified on the original transaction. + // So, the deferred transaction must always go through the authorization checking if it is not sent by a privileged contract. + // However, the old logic must still be considered because it cannot objectively change until a consensus protocol upgrade. + + bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated + + auto is_sending_only_to_self = [&trx]( const account_name& self ) { + bool send_to_self = true; + for( const auto& act : trx.actions ) { + if( act.account != self ) { + send_to_self = false; + break; + } } - } - if( check_auth ) { + return send_to_self; + }; + + try { control.get_authorization_manager() .check_authorization( trx.actions, {}, @@ -286,6 +332,22 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a std::bind(&transaction_context::checktime, &this->trx_context), false ); + } catch( const fc::exception& e ) { + if( disallow_send_to_self_bypass || !is_sending_only_to_self(receiver) ) { + throw; + } else if( control.is_producing_block() ) { + subjective_block_production_exception new_exception(FC_LOG_MESSAGE( error, "Authorization failure with sent deferred transaction consisting only of actions to self")); + for (const auto& log: e.get_log()) { + new_exception.append_log(log); + } + throw new_exception; + } + } catch( ... ) { + if( disallow_send_to_self_bypass || !is_sending_only_to_self(receiver) ) { + throw; + } else if( control.is_producing_block() ) { + EOS_THROW(subjective_block_production_exception, "Unexpected exception occurred validating sent deferred transaction consisting only of actions to self"); + } } } diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 832f69c71cd..6725468cf97 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -431,7 +431,8 @@ namespace eosio { namespace chain { const flat_set& provided_permissions, fc::microseconds provided_delay, const std::function& _checktime, - bool allow_unused_keys + bool allow_unused_keys, + const flat_set& satisfied_authorizations )const { const auto& checktime = ( static_cast(_checktime) ? _checktime : _noop_checktime ); @@ -488,9 +489,11 @@ namespace eosio { namespace chain { } } - auto res = permissions_to_satisfy.emplace( declared_auth, delay ); - if( !res.second && res.first->second > delay) { // if the declared_auth was already in the map and with a higher delay - res.first->second = delay; + if( satisfied_authorizations.find( declared_auth ) == satisfied_authorizations.end() ) { + auto res = permissions_to_satisfy.emplace( declared_auth, delay ); + if( !res.second && res.first->second > delay) { // if the declared_auth was already in the map and with a higher delay + res.first->second = delay; + } } } } diff --git a/libraries/chain/include/eosio/chain/authorization_manager.hpp b/libraries/chain/include/eosio/chain/authorization_manager.hpp index 9a75b5f80b1..a6df7ad2568 100644 --- a/libraries/chain/include/eosio/chain/authorization_manager.hpp +++ b/libraries/chain/include/eosio/chain/authorization_manager.hpp @@ -84,7 +84,8 @@ namespace eosio { namespace chain { const flat_set& provided_permissions = flat_set(), fc::microseconds provided_delay = fc::microseconds(0), const std::function& checktime = std::function(), - bool allow_unused_keys = false + bool allow_unused_keys = false, + const flat_set& satisfied_authorizations = flat_set() )const; From 341d4104a8d0fd86171817d84dc13163ad31abad Mon Sep 17 00:00:00 2001 From: Wirte Code <45449488+wirtecode@users.noreply.github.com> Date: Tue, 18 Dec 2018 16:48:03 +0800 Subject: [PATCH 10/21] has_contract() : Determine whether to deploy the contract (#18) --- contracts/eosiolib/transaction.h | 7 +++++++ libraries/chain/wasm_interface.cpp | 7 +++++++ unittests/actiondemo/actiondemo.abi | 22 +++++++++++++++++++++- unittests/actiondemo/actiondemo.cpp | 12 +++++++++++- unittests/actiondemo/actiondemo.hpp | 6 ++++++ unittests/actiondemo/test.py | 8 ++++++++ 6 files changed, 60 insertions(+), 2 deletions(-) diff --git a/contracts/eosiolib/transaction.h b/contracts/eosiolib/transaction.h index db115ca27e1..d84e4dbd604 100644 --- a/contracts/eosiolib/transaction.h +++ b/contracts/eosiolib/transaction.h @@ -108,6 +108,13 @@ extern "C" { */ void get_action_sequence(uint64_t* seq); + /** + * Get the code_hash of the account + * @param name : account name + * @return : Return has code + */ + bool has_contract( account_name name); + /** * Get the producer's signature for the action * @param sig : Memory buffer diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index f63d836d377..ca896f3ef95 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -1406,6 +1406,12 @@ class context_free_transaction_api : public context_aware_api { seq = context.global_action_sequence; } + bool has_contract(account_name name){ + const auto accnt = context.db.find( name ); + EOS_ASSERT( accnt != nullptr, action_validate_exception, "account '${account}' does not exist", ("account", name) ); + return accnt->code.size() > 0; + } + int expiration() { return context.trx_context.trx.expiration.sec_since_epoch(); } @@ -1946,6 +1952,7 @@ REGISTER_INTRINSICS(context_free_transaction_api, (transaction_size, int() ) (get_transaction_id, void(int) ) (get_action_sequence, void(int) ) + (has_contract, int(int64_t) ) (expiration, int() ) (tapos_block_prefix, int() ) (tapos_block_num, int() ) diff --git a/unittests/actiondemo/actiondemo.abi b/unittests/actiondemo/actiondemo.abi index 172d180bfef..b2504bd4fd9 100644 --- a/unittests/actiondemo/actiondemo.abi +++ b/unittests/actiondemo/actiondemo.abi @@ -1,5 +1,5 @@ { - "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-11-17T13:26:02", + "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-12-18T07:34:45", "version": "eosio::abi/1.0", "types": [], "structs": [{ @@ -45,6 +45,22 @@ "name": "clear", "base": "", "fields": [] + },{ + "name": "args_name", + "base": "", + "fields": [{ + "name": "name", + "type": "name" + } + ] + },{ + "name": "printcode", + "base": "", + "fields": [{ + "name": "t", + "type": "args_name" + } + ] },{ "name": "args_inline", "base": "", @@ -74,6 +90,10 @@ "name": "clear", "type": "clear", "ricardian_contract": "" + },{ + "name": "printcode", + "type": "printcode", + "ricardian_contract": "" },{ "name": "inlineact", "type": "inlineact", diff --git a/unittests/actiondemo/actiondemo.cpp b/unittests/actiondemo/actiondemo.cpp index 3f8a3fcb6e0..3e596af6571 100644 --- a/unittests/actiondemo/actiondemo.cpp +++ b/unittests/actiondemo/actiondemo.cpp @@ -1,7 +1,7 @@ #include "actiondemo.hpp" #include "../../contracts/eosiolib/print.hpp" #include "../../contracts/eosiolib/types.hpp" -#include "../../contracts/eosiolib/transaction.hpp" +#include "../../contracts/eosiolib/transaction.h" namespace spaceaction { @@ -19,6 +19,9 @@ namespace spaceaction { case N(clear): clear(); return; + case N(printcode): + printcode(unpack_action_data()); + return; } } @@ -33,6 +36,8 @@ namespace spaceaction { } } + + std::string to_hex( const char* d, uint32_t s ) { std::string r; @@ -43,6 +48,11 @@ namespace spaceaction { return r; } + void actiondemo::printcode(const args_name& t){ + bool r = has_contract(t.name); + print_f("% code hash:%", name{t.name}.to_string(),r); + } + void actiondemo::generate(const args& t){ for (int i = 0; i < t.loop; ++i) { transaction_id_type txid; diff --git a/unittests/actiondemo/actiondemo.hpp b/unittests/actiondemo/actiondemo.hpp index e1d5031bfa5..626ff588b66 100644 --- a/unittests/actiondemo/actiondemo.hpp +++ b/unittests/actiondemo/actiondemo.hpp @@ -22,6 +22,12 @@ namespace spaceaction { //@abi action void clear(); + struct args_name{ + account_name name; + }; + //@abi action + void printcode(const args_name& t); + struct args_inline{ account_name payer; diff --git a/unittests/actiondemo/test.py b/unittests/actiondemo/test.py index 5ced2b4276c..a03fccaaa30 100644 --- a/unittests/actiondemo/test.py +++ b/unittests/actiondemo/test.py @@ -159,6 +159,13 @@ def stepGenerate(): run(args.cleos + 'get table %s %s seedobjs' %(args.contract2, args.contract2) ) print ("sleep 5") +def stepGetCode(): + print ("=========================== set stepGetCode ===========================" ) + run(args.cleos + 'push action %s printcode \'[{"name":"eosio.token"}]\' -p %s ' %(args.contract,args.contract)) + run(args.cleos + 'push action %s printcode \'[{"name":"eosio"}]\' -p %s ' %(args.contract,args.contract)) + run(args.cleos + 'push action %s printcode \'[{"name":"eosio.ram"}]\' -p %s ' %(args.contract,args.contract)) + print ("sleep 5") + parser = argparse.ArgumentParser() @@ -169,6 +176,7 @@ def stepGenerate(): ('i', 'init', stepInitCaee, True, "stepInitCaee"), ('c', 'clear', stepClear, True, "stepInitCaee"), ('g', 'generate', stepGenerate, True, "stepInitCaee"), + ('d', 'getcode', stepGetCode, True, "stepGetCode"), ] parser.add_argument('--public-key', metavar='', help="EOSIO Public Key", default='EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV', dest="public_key") From 52d0745cec32226e2368f48195cd17ed1ffb9bce Mon Sep 17 00:00:00 2001 From: Wirte Code <45449488+wirtecode@users.noreply.github.com> Date: Tue, 18 Dec 2018 19:39:06 +0800 Subject: [PATCH 11/21] Fix comments and function names (#19) --- contracts/eosiolib/transaction.h | 4 ++-- unittests/actiondemo/actiondemo.abi | 8 ++++---- unittests/actiondemo/actiondemo.cpp | 8 ++++---- unittests/actiondemo/actiondemo.hpp | 2 +- unittests/actiondemo/test.py | 6 +++--- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/contracts/eosiolib/transaction.h b/contracts/eosiolib/transaction.h index d84e4dbd604..c8d31899a7d 100644 --- a/contracts/eosiolib/transaction.h +++ b/contracts/eosiolib/transaction.h @@ -109,9 +109,9 @@ extern "C" { void get_action_sequence(uint64_t* seq); /** - * Get the code_hash of the account + * Tests if the account has an installed contract * @param name : account name - * @return : Return has code + * @return : Return has contract */ bool has_contract( account_name name); diff --git a/unittests/actiondemo/actiondemo.abi b/unittests/actiondemo/actiondemo.abi index b2504bd4fd9..167314a5d24 100644 --- a/unittests/actiondemo/actiondemo.abi +++ b/unittests/actiondemo/actiondemo.abi @@ -1,5 +1,5 @@ { - "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-12-18T07:34:45", + "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-12-18T11:11:23", "version": "eosio::abi/1.0", "types": [], "structs": [{ @@ -54,7 +54,7 @@ } ] },{ - "name": "printcode", + "name": "hascontract", "base": "", "fields": [{ "name": "t", @@ -91,8 +91,8 @@ "type": "clear", "ricardian_contract": "" },{ - "name": "printcode", - "type": "printcode", + "name": "hascontract", + "type": "hascontract", "ricardian_contract": "" },{ "name": "inlineact", diff --git a/unittests/actiondemo/actiondemo.cpp b/unittests/actiondemo/actiondemo.cpp index 3e596af6571..73cda150dc8 100644 --- a/unittests/actiondemo/actiondemo.cpp +++ b/unittests/actiondemo/actiondemo.cpp @@ -19,8 +19,8 @@ namespace spaceaction { case N(clear): clear(); return; - case N(printcode): - printcode(unpack_action_data()); + case N(hascontract): + hascontract(unpack_action_data()); return; } } @@ -48,9 +48,9 @@ namespace spaceaction { return r; } - void actiondemo::printcode(const args_name& t){ + void actiondemo::hascontract(const args_name& t){ bool r = has_contract(t.name); - print_f("% code hash:%", name{t.name}.to_string(),r); + print_f("% has_contract:%", name{t.name}.to_string(),r); } void actiondemo::generate(const args& t){ diff --git a/unittests/actiondemo/actiondemo.hpp b/unittests/actiondemo/actiondemo.hpp index 626ff588b66..2008eb17e6c 100644 --- a/unittests/actiondemo/actiondemo.hpp +++ b/unittests/actiondemo/actiondemo.hpp @@ -26,7 +26,7 @@ namespace spaceaction { account_name name; }; //@abi action - void printcode(const args_name& t); + void hascontract(const args_name& t); struct args_inline{ diff --git a/unittests/actiondemo/test.py b/unittests/actiondemo/test.py index a03fccaaa30..6627374173c 100644 --- a/unittests/actiondemo/test.py +++ b/unittests/actiondemo/test.py @@ -161,9 +161,9 @@ def stepGenerate(): def stepGetCode(): print ("=========================== set stepGetCode ===========================" ) - run(args.cleos + 'push action %s printcode \'[{"name":"eosio.token"}]\' -p %s ' %(args.contract,args.contract)) - run(args.cleos + 'push action %s printcode \'[{"name":"eosio"}]\' -p %s ' %(args.contract,args.contract)) - run(args.cleos + 'push action %s printcode \'[{"name":"eosio.ram"}]\' -p %s ' %(args.contract,args.contract)) + run(args.cleos + 'push action %s hascontract \'[{"name":"eosio.token"}]\' -p %s ' %(args.contract,args.contract)) + run(args.cleos + 'push action %s hascontract \'[{"name":"eosio"}]\' -p %s ' %(args.contract,args.contract)) + run(args.cleos + 'push action %s hascontract \'[{"name":"eosio.ram"}]\' -p %s ' %(args.contract,args.contract)) print ("sleep 5") From c6f4a42c59454e65f9a40465b89875275a84a742 Mon Sep 17 00:00:00 2001 From: flyxl Date: Mon, 7 Jan 2019 21:24:06 +0800 Subject: [PATCH 12/21] fix tag version typo of cppkafka (#24) --- scripts/eosio_build_centos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index bedf64f6058..06fff117032 100644 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -765,7 +765,7 @@ mongodconf exit 1; fi fi - if ! git clone --depth 1 -b v0.2 https://github.com/boscore/cppkafka.git + if ! git clone --depth 1 -b 0.2 https://github.com/boscore/cppkafka.git then printf "\\tUnable to clone cppkafka repo.\\n" printf "\\n\\tExiting now.\\n" From 66e545c08bf5ad9fa6f9627a77b1af7af46619f8 Mon Sep 17 00:00:00 2001 From: Wirte Code <45449488+wirtecode@users.noreply.github.com> Date: Tue, 8 Jan 2019 14:55:53 +0800 Subject: [PATCH 13/21] get_contract_code (#26) --- contracts/eosiolib/transaction.h | 7 +++++++ libraries/chain/wasm_interface.cpp | 12 ++++++++++++ unittests/actiondemo/actiondemo.abi | 2 +- unittests/actiondemo/actiondemo.cpp | 9 +++++++++ unittests/actiondemo/test.py | 6 ++++-- 5 files changed, 33 insertions(+), 3 deletions(-) diff --git a/contracts/eosiolib/transaction.h b/contracts/eosiolib/transaction.h index c8d31899a7d..95a41322acc 100644 --- a/contracts/eosiolib/transaction.h +++ b/contracts/eosiolib/transaction.h @@ -115,6 +115,13 @@ extern "C" { */ bool has_contract( account_name name); + /** + * Get the code of the deployment contract + * @param name : account name + * @param code : return contract code + */ + void get_contract_code( account_name name, checksum256* code); + /** * Get the producer's signature for the action * @param sig : Memory buffer diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index ca896f3ef95..12ff329bd29 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -1412,6 +1412,17 @@ class context_free_transaction_api : public context_aware_api { return accnt->code.size() > 0; } + void get_contract_code(account_name name, fc::sha256& code ) { + const auto accnt = context.db.find( name ); + EOS_ASSERT( accnt != nullptr, action_validate_exception, "account '${account}' does not exist", ("account", name) ); + + if( accnt->code.size() > 0) { + code = fc::sha256::hash( accnt->code.data(), accnt->code.size() ); + } else { + code = fc::sha256(); + } + } + int expiration() { return context.trx_context.trx.expiration.sec_since_epoch(); } @@ -1953,6 +1964,7 @@ REGISTER_INTRINSICS(context_free_transaction_api, (get_transaction_id, void(int) ) (get_action_sequence, void(int) ) (has_contract, int(int64_t) ) + (get_contract_code, void(int64_t, int) ) (expiration, int() ) (tapos_block_prefix, int() ) (tapos_block_num, int() ) diff --git a/unittests/actiondemo/actiondemo.abi b/unittests/actiondemo/actiondemo.abi index 167314a5d24..eb3c15c7c13 100644 --- a/unittests/actiondemo/actiondemo.abi +++ b/unittests/actiondemo/actiondemo.abi @@ -1,5 +1,5 @@ { - "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-12-18T11:11:23", + "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2019-01-07T10:42:22", "version": "eosio::abi/1.0", "types": [], "structs": [{ diff --git a/unittests/actiondemo/actiondemo.cpp b/unittests/actiondemo/actiondemo.cpp index 73cda150dc8..5dc57603d73 100644 --- a/unittests/actiondemo/actiondemo.cpp +++ b/unittests/actiondemo/actiondemo.cpp @@ -51,6 +51,15 @@ namespace spaceaction { void actiondemo::hascontract(const args_name& t){ bool r = has_contract(t.name); print_f("% has_contract:%", name{t.name}.to_string(),r); + +// if (r) { + checksum256 code; + get_contract_code(t.name, &code); + + std::string s = to_hex((char*)&code.hash, 32); + print_f("% contract_code:%", name{t.name}.to_string(),s); +// } + } void actiondemo::generate(const args& t){ diff --git a/unittests/actiondemo/test.py b/unittests/actiondemo/test.py index 6627374173c..e2cb65e06b7 100644 --- a/unittests/actiondemo/test.py +++ b/unittests/actiondemo/test.py @@ -90,7 +90,7 @@ def stepStartWallet(): importKeys() # run('rm -rf ~/.local/share/eosio/nodeos/data ') run("rm -rf ./data/*") - background(args.nodeos + ' -e -p eosio --blocks-dir ./data/block/ --genesis-json %s --config-dir ./ --data-dir ./data/ --plugin eosio::http_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_plugin --plugin eosio::history_api_plugin> eos.log 2>&1 &' % args.genesis) + background(args.nodeos + ' -e -p eosio --blocks-dir ./data/block/ --genesis-json %s --config-dir ./ --data-dir ./data/ --plugin eosio::http_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_plugin --plugin eosio::history_api_plugin --plugin eosio::history_plugin> eos.log 2>&1 &' % args.genesis) run("rm -rf ./data2/*") background(args.nodeos + ' --blocks-dir ./data2/block/ --genesis-json %s --data-dir ./data2/ --config-dir ./ --p2p-peer-address 127.0.0.1:9876 --http-server-address 0.0.0.0:8001 --p2p-listen-endpoint 0.0.0.0:9001 --plugin eosio::http_plugin --plugin eosio::chain_api_plugin --plugin eosio::producer_plugin --plugin eosio::history_api_plugin > eos2.log 2>&1 &' % args.genesis) sleep(30) @@ -103,6 +103,7 @@ def createAccounts(): run(args.cleos + 'set contract eosio.msig ' + args.contracts_dir + 'eosio.msig/') run(args.cleos + 'push action eosio.token create \'["eosio", "10000000000.0000 %s"]\' -p eosio.token' % (args.symbol)) run(args.cleos + 'push action eosio.token issue \'["eosio", "%s %s", "memo"]\' -p eosio' % ("1000000.0000", args.symbol)) + run(args.cleos + 'push action eosio.token issue \'["%s", "%s %s", "memo"]\' -p eosio' % (args.contract, "1000.0000", args.symbol)) retry(args.cleos + 'set contract eosio ' + args.contracts_dir + 'eosio.system/ -p eosio') sleep(1) run(args.cleos + 'push action eosio setpriv' + jsonArg(['eosio.msig', 1]) + '-p eosio@active') @@ -164,6 +165,7 @@ def stepGetCode(): run(args.cleos + 'push action %s hascontract \'[{"name":"eosio.token"}]\' -p %s ' %(args.contract,args.contract)) run(args.cleos + 'push action %s hascontract \'[{"name":"eosio"}]\' -p %s ' %(args.contract,args.contract)) run(args.cleos + 'push action %s hascontract \'[{"name":"eosio.ram"}]\' -p %s ' %(args.contract,args.contract)) + run(args.cleos + 'push action %s hascontract \'[{"name":"caeeregright"}]\' -p %s ' %(args.contract,args.contract)) print ("sleep 5") @@ -211,7 +213,7 @@ def stepGetCode(): accnum = 26 -accounts = [] +accounts = ['caeeregright'] # for i in range(97,97+accnum): # accounts.append("user%c"% chr(i)) # accounts.append("payman") From 98996385f886e285cf30b46d301648a9163ab73c Mon Sep 17 00:00:00 2001 From: vlbos <45447465+vlbos@users.noreply.github.com> Date: Tue, 8 Jan 2019 14:58:23 +0800 Subject: [PATCH 14/21] fixed #20Using greylist-account while starting nodeos for the first time throws exception (#23) * restart sync list db * recovery system account bos to eosio * catch exception plugin initialize sync list before initialize database --- libraries/chain/controller.cpp | 15 ++++++++---- unittests/database_gmr_blklst_tests.cpp | 31 ++++++++++++++----------- 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index cb824cf45ba..dd5a85ba2df 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -741,10 +741,17 @@ struct controller_impl { void sync_name_list(list_type list,bool isMerge=false) { - const auto &gpo2 = db.get(); - db.modify(gpo2, [&](auto &gprops2) { - sync_list_and_db(list, gprops2,isMerge); - }); + try + { + const auto &gpo2 = db.get(); + db.modify(gpo2, [&](auto &gprops2) { + sync_list_and_db(list, gprops2, isMerge); + }); + } + catch (...) + { + wlog("plugin initialize sync list ignore before initialize database"); + } } // "bos end" diff --git a/unittests/database_gmr_blklst_tests.cpp b/unittests/database_gmr_blklst_tests.cpp index f448ba5a172..148a7d7bace 100644 --- a/unittests/database_gmr_blklst_tests.cpp +++ b/unittests/database_gmr_blklst_tests.cpp @@ -73,20 +73,25 @@ BOOST_AUTO_TEST_CASE(set_name_list_test) vector list = parse_list_string(str); flat_set nameset(list.begin(), list.end()); + // Create an account + db.create([](account_object &a) { + a.name = "alice"; + }); - test.control->set_actor_blacklist(nameset); + + test.control->add_resource_greylist(N(alice)); // Make sure we can retrieve that account by name const global_property2_object &ptr = db.get(); // Create an account db.modify(ptr, [&](global_property2_object &a) { - a.cfg.actor_blacklist = {N(a)}; - a.cfg.contract_blacklist = {N(a)}; - a.cfg.resource_greylist = {N(a)}; + // a.cfg.actor_blacklist = {N(a)}; + // a.cfg.contract_blacklist = {N(a)}; + // a.cfg.resource_greylist = {N(a)}; }); - int64_t lt = static_cast(list_type::actor_blacklist_type); + int64_t lt = static_cast(list_type::resource_greylist_type); int64_t lat = static_cast(list_action_type::insert_type); test.control->set_name_list(lt, lat, list); @@ -114,24 +119,24 @@ BOOST_AUTO_TEST_CASE(set_name_list_test) const global_property2_object &ptr1 = db.get(); chain_config2 c = ptr1.cfg; - BOOST_TEST(c.actor_blacklist.size() == 4); - BOOST_TEST(ab.size() == 4); + BOOST_TEST(c.resource_greylist.size() == 1); + BOOST_TEST(rg.size() == 1); convert_names(c.actor_blacklist, aab); convert_names(c.contract_blacklist, acb); convert_names(c.resource_greylist, arg); - if (c.actor_blacklist.size() == 4) + if (c.resource_greylist.size() == 1) { - bool b = (aab.find(N(a)) != aab.end()); - BOOST_TEST(b); + // bool b = (aab.find(N(a)) != aab.end()); + // BOOST_TEST(b); } - bool d = ab.find(N(a)) != ab.end(); - BOOST_TEST(d); - bool m = aab.find(N(alice)) != aab.end(); + // bool d = ab.find(N(a)) != ab.end(); + // BOOST_TEST(d); + bool m = arg.find(N(alice)) != arg.end(); BOOST_TEST(m); // Undo creation of the account From 2f2640305bc4344681fd0ce66db0aee62efb4c88 Mon Sep 17 00:00:00 2001 From: vlbos <45447465+vlbos@users.noreply.github.com> Date: Thu, 10 Jan 2019 15:46:40 +0800 Subject: [PATCH 15/21] fixed 'cleos system bidname info' cmd exec return could not parse uint64_t (#30) * recovery system account bos to eosio * catch exception plugin initialize sync list before initialize database * fixed bidnameinfo could not parse uint64_t --- programs/cleos/main.cpp | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 7124357f158..af70e0683b8 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -1285,33 +1285,38 @@ struct bidname_subcommand { struct bidname_info_subcommand { bool print_json = false; - string newname_str; + name newname; bidname_info_subcommand(CLI::App* actionRoot) { auto list_producers = actionRoot->add_subcommand("bidnameinfo", localized("Get bidname info")); list_producers->add_flag("--json,-j", print_json, localized("Output in JSON format")); - list_producers->add_option("newname", newname_str, localized("The bidding name"))->required(); + list_producers->add_option("newname", newname, localized("The bidding name"))->required(); list_producers->set_callback([this] { auto rawResult = call(get_table_func, fc::mutable_variant_object("json", true) ("code", "eosio")("scope", "eosio")("table", "namebids") - ("lower_bound", eosio::chain::string_to_name(newname_str.c_str()))("limit", 1)); + ("lower_bound", newname.value)("limit", 1)); if ( print_json ) { std::cout << fc::json::to_pretty_string(rawResult) << std::endl; return; } auto result = rawResult.as(); - if ( result.rows.empty() ) { + // Condition in if statement below can simply be res.rows.empty() when cleos no longer needs to support nodeos versions older than 1.5.0 + if( result.rows.empty() || result.rows[0].get_object()["newname"].as_string() != newname.to_string() ) { std::cout << "No bidname record found" << std::endl; return; } - for ( auto& row : result.rows ) { - fc::time_point time(fc::microseconds(row["last_bid_time"].as_uint64())); - int64_t bid = row["high_bid"].as_int64(); - std::cout << std::left << std::setw(18) << "bidname:" << std::right << std::setw(24) << row["newname"].as_string() << "\n" - << std::left << std::setw(18) << "highest bidder:" << std::right << std::setw(24) << row["high_bidder"].as_string() << "\n" - << std::left << std::setw(18) << "highest bid:" << std::right << std::setw(24) << (bid > 0 ? bid : -bid) << "\n" - << std::left << std::setw(18) << "last bid time:" << std::right << std::setw(24) << ((std::string)time).c_str() << std::endl; - if (bid < 0) std::cout << "This auction has already closed" << std::endl; + const auto& row = result.rows[0]; + string time = row["last_bid_time"].as_string(); + try { + time = (string)fc::time_point(fc::microseconds(to_uint64(time))); + } catch (fc::parse_error_exception&) { } + int64_t bid = row["high_bid"].as_int64(); + std::cout << std::left << std::setw(18) << "bidname:" << std::right << std::setw(24) << row["newname"].as_string() << "\n" + << std::left << std::setw(18) << "highest bidder:" << std::right << std::setw(24) << row["high_bidder"].as_string() << "\n" + << std::left << std::setw(18) << "highest bid:" << std::right << std::setw(24) << (bid > 0 ? bid : -bid) << "\n" + << std::left << std::setw(18) << "last bid time:" << std::right << std::setw(24) << time << std::endl; + if (bid < 0) std::cout << "This auction has already closed" << std::endl; + }); } }; From 1aca42970253c0a907a6f9c5ec263dfad5f94c0e Mon Sep 17 00:00:00 2001 From: thaipandada Date: Sun, 13 Jan 2019 15:12:03 +0800 Subject: [PATCH 16/21] prepare for 2.0.1 --- CMakeLists.txt | 4 ++-- Docker/README.md | 6 +++--- README.md | 13 +++++++------ README_CN.md | 11 ++++++----- contracts/eosio.system/delegate_bandwidth.cpp | 2 +- programs/cleos/main.cpp | 7 +------ 6 files changed, 20 insertions(+), 23 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3a6d7a65773..c92f5276c17 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,9 +33,9 @@ set( CMAKE_CXX_STANDARD 14 ) set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) -set(VERSION_MAJOR 1) +set(VERSION_MAJOR 2) set(VERSION_MINOR 0) -set(VERSION_PATCH 3) +set(VERSION_PATCH 1) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) diff --git a/Docker/README.md b/Docker/README.md index 6c6b5ed3cc1..582b8936e0d 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -17,13 +17,13 @@ Simple and fast setup of BOSCore on Docker is also available. ```bash git clone https://github.com/boscore/bos.git --recursive --depth 1 cd bos/Docker -docker build . -t boscore/bos +docker build . -t boscore/bos -s BOS ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.3 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v2.0.1 tag, you could do the following: ```bash -docker build -t boscore/bos:v1.0.3 --build-arg branch=v1.0.3 . +docker build -t boscore/bos:v2.0.1 --build-arg branch=v2.0.1 . ``` diff --git a/README.md b/README.md index 57ff18ff0c5..a237aa1cf77 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # BOSCore - Born for DApps. Born for Usability. -## BOSCore Version: v1.0.3 -### Basic EOSIO Version: v1.4.4 +## BOSCore Version: v2.0.1 +### Basic EOSIO Version: v1.5.3-patched # Background The emergence of EOS has brought new imagination to the blockchain. In just a few months since the main network was launched, the version has undergone dozens of upgrades, not only the stability has been greatly improved, but also the new functions have been gradually realized. The node team is also actively involved in building the EOSIO ecosystem. What is even more exciting is that EOS has attracted more and more development teams. There are already hundreds of DApp running on the EOS main network. The transaction volume and circulation market value far exceed Ethereum, and the space for development is growing broader. @@ -20,19 +20,20 @@ As BOS continues to develop, developer rewards will be appropriately adjusted to ## Links 1. [Website](https://boscore.io) -2. [Developer Telegram Group](https://t.me/BOSCoreDev) -3. [WhitePaper](https://github.com/boscore/Documentation/blob/master/BOSCoreTechnicalWhitePaper.md) -4. [白皮书](https://github.com/boscore/Documentation/blob/master/zh-CN/BOSCoreTechnicalWhitePaper.md) +2. [Developer Telegram Group](https://t.me/BOSDevelopers) +3. [Community Telegram Group](https://t.me/boscorecommunity) +4. [WhitePaper](https://github.com/boscore/Documentation/blob/master/BOSCoreTechnicalWhitePaper.md) +5. [白皮书](https://github.com/boscore/Documentation/blob/master/zh-CN/BOSCoreTechnicalWhitePaper.md) ## Start 1. Build from code : `bash ./eosio_build.sh -s BOS` 2. Docker Style,check [Docker](./Docker/README.md) + BOSCore bases on EOSIO, so you can also referer: [Getting Started](https://developers.eos.io/eosio-nodeos/docs/overview-1) - [EOSIO Developer Portal](https://developers.eos.io). diff --git a/README_CN.md b/README_CN.md index a1f55247bcf..e8e8d3a9d41 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,7 +1,7 @@ # BOSCore - 更可用的链,为DApp而生。 -## BOSCore Version: v1.0.3 -### Basic EOSIO Version: v1.4.4 +## BOSCore Version: v2.0.1 +### Basic EOSIO Version: v1.5.3-patched # 背景 EOS的出现给区块链带来了新的想象力,主网启动短短几个月以来,版本经历了几十次升级,不仅稳定性得到了很大提高,并且新功能也逐步实现,各个节点团队也积极参与建设EOSIO生态。让人更加兴奋的是,EOS已经吸引了越来越多的开发团队,当前已经有数百个DApp在EOS主网上面运行,其交易量和流通市值远超以太坊,可发展的空间愈来愈广阔。 @@ -20,9 +20,10 @@ BOS链的代码完全由社区贡献并维护,每个生态参与者都可以 ## 资源 1. [官网](https://boscore.io) -2. [开发者社群](https://t.me/BOSCoreDev) -3. [WhitePaper](https://github.com/boscore/Documentation/blob/master/BOSCoreTechnicalWhitePaper.md) -4. [白皮书](https://github.com/boscore/Documentation/blob/master/zh-CN/BOSCoreTechnicalWhitePaper.md) +2. [Developer Telegram Group](https://t.me/BOSDevelopers) +3. [Community Telegram Group](https://t.me/boscorecommunity) +4. [WhitePaper](https://github.com/boscore/Documentation/blob/master/BOSCoreTechnicalWhitePaper.md) +5. [白皮书](https://github.com/boscore/Documentation/blob/master/zh-CN/BOSCoreTechnicalWhitePaper.md) ## 开始 1. 源码直接编译: `bash ./eosio_build.sh -s BOS` diff --git a/contracts/eosio.system/delegate_bandwidth.cpp b/contracts/eosio.system/delegate_bandwidth.cpp index 95a40781530..fd4e83fcce7 100644 --- a/contracts/eosio.system/delegate_bandwidth.cpp +++ b/contracts/eosio.system/delegate_bandwidth.cpp @@ -205,7 +205,7 @@ namespace eosiosystem { const int64_t max_claimable = 100'000'000'0000ll; const int64_t claimable = int64_t(max_claimable * double(now()-base_time) / (10*seconds_per_year) ); - eosio_assert( max_claimable - claimable <= stake, "bosbosbosbos can only claim their tokens over 10 years" ); + eosio_assert( max_claimable - claimable <= stake, "bos can only claim their tokens over 10 years" ); } void system_contract::changebw( account_name from, account_name receiver, diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index bc318b61991..127c9a965f0 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -952,13 +952,8 @@ struct register_producer_subcommand { register_producer->add_option("account", producer_str, localized("The account to register as a producer"))->required(); register_producer->add_option("producer_key", producer_key_str, localized("The producer's public key"))->required(); register_producer->add_option("url", url, localized("url where info about producer can be found"), true); -<<<<<<< HEAD - register_producer->add_option("location", loc, localized("relative location for purpose of nearest neighbor scheduling"), true); - add_standard_transaction_options(register_producer, "account@active"); -======= register_producer->add_option("location", loc, localized("time zone from -11 to 12 "))->required(); - add_standard_transaction_options(register_producer); ->>>>>>> develop + add_standard_transaction_options(register_producer, "account@active"); register_producer->set_callback([this] { From 68ab2552f4949f2d816348a931cd3f74b1abdfa2 Mon Sep 17 00:00:00 2001 From: thaipandada Date: Sun, 13 Jan 2019 15:24:17 +0800 Subject: [PATCH 17/21] adjust the location paramater --- programs/cleos/main.cpp | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 127c9a965f0..b8d5a557aac 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -557,15 +557,14 @@ chain::action create_delegate(const name& from, const name& receiver, const asse config::system_account_name, N(delegatebw), act_payload); } -fc::variant regproducer_variant(const account_name& producer, const public_key_type& key, const string& url, const string& location) { -auto _location=atoi(location.c_str()); - FC_ASSERT(_location>-12&&_location<=12,"time zone setting is not legal"); - _location=_location>=0?_location:24+_location; +fc::variant regproducer_variant(const account_name& producer, const public_key_type& key, const string& url, uint16_t location) { + FC_ASSERT(location>-12&&location<=12,"time zone setting is not legal"); + location=location>=0?location:24+location; return fc::mutable_variant_object() ("producer", producer) ("producer_key", key) ("url", url) - ("location", _location) + ("location", location) ; } @@ -945,14 +944,14 @@ struct register_producer_subcommand { string producer_str; string producer_key_str; string url; - string loc; + uint16_t loc = 0; register_producer_subcommand(CLI::App* actionRoot) { auto register_producer = actionRoot->add_subcommand("regproducer", localized("Register a new producer")); register_producer->add_option("account", producer_str, localized("The account to register as a producer"))->required(); register_producer->add_option("producer_key", producer_key_str, localized("The producer's public key"))->required(); register_producer->add_option("url", url, localized("url where info about producer can be found"), true); - register_producer->add_option("location", loc, localized("time zone from -11 to 12 "))->required(); + register_producer->add_option("location", loc, localized("time zone from -11 to 12"), true)->required(); add_standard_transaction_options(register_producer, "account@active"); From 91973994265a4b74533ebfae47149ddcc94c1926 Mon Sep 17 00:00:00 2001 From: thaipandada Date: Sun, 13 Jan 2019 15:37:30 +0800 Subject: [PATCH 18/21] apply 1.5.3 patch --- .../eosio/producer_plugin/producer_plugin.hpp | 1 + plugins/producer_plugin/producer_plugin.cpp | 67 +++++++++++++------ 2 files changed, 48 insertions(+), 20 deletions(-) diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index f2e50e92849..7b967421502 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -23,6 +23,7 @@ class producer_plugin : public appbase::plugin { fc::optional max_irreversible_block_age; fc::optional produce_time_offset_us; fc::optional last_block_time_offset_us; + fc::optional max_scheduled_transaction_time_per_block_ms; fc::optional subjective_cpu_leeway_us; fc::optional incoming_defer_ratio; }; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 6035547ebc1..bc29cd87ea3 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -136,6 +136,7 @@ class producer_plugin_impl : public std::enable_shared_from_this& weak_this, const block_timestamp_type& current_block_time); }; @@ -526,6 +529,8 @@ void producer_plugin::set_program_options( "offset of non last block producing time in microseconds. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") ("last-block-time-offset-us", boost::program_options::value()->default_value(0), "offset of last block producing time in microseconds. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") + ("max-scheduled-transaction-time-per-block-ms", boost::program_options::value()->default_value(100), + "Maximum wall-clock time, in milliseconds, spent retiring scheduled transactions in any block before returning to normal transaction processing.") ("incoming-defer-ratio", bpo::value()->default_value(1.0), "ratio between incoming transations and deferred transactions when both are exhausted") ("snapshots-dir", bpo::value()->default_value("snapshots"), @@ -652,6 +657,8 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_last_block_time_offset_us = options.at("last-block-time-offset-us").as(); + my->_max_scheduled_transaction_time_per_block_ms = options.at("max-scheduled-transaction-time-per-block-ms").as(); + my->_max_transaction_time_ms = options.at("max-transaction-time").as(); my->_max_irreversible_block_age_us = fc::seconds(options.at("max-irreversible-block-age").as()); @@ -803,6 +810,10 @@ void producer_plugin::update_runtime_options(const runtime_options& options) { my->_last_block_time_offset_us = *options.last_block_time_offset_us; } + if (options.max_scheduled_transaction_time_per_block_ms) { + my->_max_scheduled_transaction_time_per_block_ms = *options.max_scheduled_transaction_time_per_block_ms; + } + if (options.incoming_defer_ratio) { my->_incoming_defer_ratio = *options.incoming_defer_ratio; } @@ -824,7 +835,8 @@ producer_plugin::runtime_options producer_plugin::get_runtime_options() const { my->_max_transaction_time_ms, my->_max_irreversible_block_age_us.count() < 0 ? -1 : my->_max_irreversible_block_age_us.count() / 1'000'000, my->_produce_time_offset_us, - my->_last_block_time_offset_us + my->_last_block_time_offset_us, + my->_max_scheduled_transaction_time_per_block_ms }; } @@ -993,6 +1005,11 @@ fc::time_point producer_plugin_impl::calculate_pending_block_time() const { return block_time; } +fc::time_point producer_plugin_impl::calculate_block_deadline( const fc::time_point& block_time ) const { + bool last_block = ((block_timestamp_type(block_time).slot % config::producer_repetitions) == config::producer_repetitions - 1); + return block_time + fc::microseconds(last_block ? _last_block_time_offset_us : _produce_time_offset_us); +} + enum class tx_category { PERSISTED, UNEXPIRED_UNPERSISTED, @@ -1000,7 +1017,7 @@ enum class tx_category { }; -producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool &last_block) { +producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { chain::controller& chain = app().get_plugin().chain(); if( chain.get_read_mode() == chain::db_read_mode::READ_ONLY ) @@ -1016,7 +1033,6 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool _pending_block_mode = pending_block_mode::producing; // Not our turn - last_block = ((block_timestamp_type(block_time).slot % config::producer_repetitions) == config::producer_repetitions - 1); const auto& scheduled_producer = hbs->get_scheduled_producer(block_time); auto currrent_watermark_itr = _producer_watermarks.find(scheduled_producer.producer_name); auto signature_provider_itr = _signature_providers.find(scheduled_producer.block_signing_key); @@ -1086,6 +1102,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool const auto& pbs = chain.pending_block_state(); if (pbs) { + const fc::time_point preprocess_deadline = calculate_block_deadline(block_time); if (_pending_block_mode == pending_block_mode::producing && pbs->block_signing_key != scheduled_producer.block_signing_key) { elog("Block Signing Key is not expected value, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.block_signing_key)("actual", pbs->block_signing_key)); @@ -1168,7 +1185,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool int num_processed = 0; for (const auto& trx: apply_trxs) { - if (block_time <= fc::time_point::now()) exhausted = true; + if (preprocess_deadline <= fc::time_point::now()) exhausted = true; if (exhausted) { break; } @@ -1178,9 +1195,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool try { auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && preprocess_deadline < deadline)) { deadline_is_subjective = true; - deadline = block_time; + deadline = preprocess_deadline; } auto trace = chain.push_transaction(trx, deadline); @@ -1233,8 +1250,16 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool int num_failed = 0; int num_processed = 0; + auto scheduled_trx_deadline = preprocess_deadline; + if (_max_scheduled_transaction_time_per_block_ms >= 0) { + scheduled_trx_deadline = std::min( + scheduled_trx_deadline, + fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms) + ); + } + for (const auto& trx : scheduled_trxs) { - if (block_time <= fc::time_point::now()) exhausted = true; + if (scheduled_trx_deadline <= fc::time_point::now()) exhausted = true; if (exhausted) { break; } @@ -1243,6 +1268,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool // configurable ratio of incoming txns vs deferred txns while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { + if (scheduled_trx_deadline <= fc::time_point::now()) break; + auto e = _pending_incoming_transactions.front(); _pending_incoming_transactions.pop_front(); --orig_pending_txn_size; @@ -1250,7 +1277,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); } - if (block_time <= fc::time_point::now()) { + if (scheduled_trx_deadline <= fc::time_point::now()) { exhausted = true; break; } @@ -1262,9 +1289,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool try { auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && scheduled_trx_deadline < deadline)) { deadline_is_subjective = true; - deadline = block_time; + deadline = scheduled_trx_deadline; } auto trace = chain.push_scheduled_transaction(trx, deadline); @@ -1298,7 +1325,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool } } - if (exhausted || block_time <= fc::time_point::now()) { + if (exhausted || preprocess_deadline <= fc::time_point::now()) { return start_block_result::exhausted; } else { // attempt to apply any pending incoming transactions @@ -1311,7 +1338,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool _pending_incoming_transactions.pop_front(); --orig_pending_txn_size; on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - if (block_time <= fc::time_point::now()) return start_block_result::exhausted; + if (preprocess_deadline <= fc::time_point::now()) return start_block_result::exhausted; } } return start_block_result::succeeded; @@ -1332,8 +1359,7 @@ void producer_plugin_impl::schedule_production_loop() { _timer.cancel(); std::weak_ptr weak_this = shared_from_this(); - bool last_block; - auto result = start_block(last_block); + auto result = start_block(); if (result == start_block_result::failed) { elog("Failed to start a pending block, will try again later"); @@ -1359,11 +1385,12 @@ void producer_plugin_impl::schedule_production_loop() { // we succeeded but block may be exhausted static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); - if (result == start_block_result::succeeded) { + auto deadline = calculate_block_deadline(chain.pending_block_time()); + + if (deadline > fc::time_point::now()) { // ship this block off no later than its deadline EOS_ASSERT( chain.pending_block_state(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded" ); - auto deadline = chain.pending_block_time().time_since_epoch().count() + (last_block ? _last_block_time_offset_us : _produce_time_offset_us); - _timer.expires_at( epoch + boost::posix_time::microseconds( deadline )); + _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() )); fc_dlog(_log, "Scheduling Block Production on Normal Block #${num} for ${time}", ("num", chain.pending_block_state()->block_num)("time",deadline)); } else { EOS_ASSERT( chain.pending_block_state(), missing_pending_block_state, "producing without pending_block_state" ); From 7020da9fc454520ec4472b60187f0e36161e488c Mon Sep 17 00:00:00 2001 From: thaipandada Date: Sun, 13 Jan 2019 21:10:42 +0800 Subject: [PATCH 19/21] modify the location type --- programs/cleos/main.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index b8d5a557aac..ddcb8c11728 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -557,7 +557,7 @@ chain::action create_delegate(const name& from, const name& receiver, const asse config::system_account_name, N(delegatebw), act_payload); } -fc::variant regproducer_variant(const account_name& producer, const public_key_type& key, const string& url, uint16_t location) { +fc::variant regproducer_variant(const account_name& producer, const public_key_type& key, const string& url, int16_t location) { FC_ASSERT(location>-12&&location<=12,"time zone setting is not legal"); location=location>=0?location:24+location; return fc::mutable_variant_object() @@ -944,7 +944,7 @@ struct register_producer_subcommand { string producer_str; string producer_key_str; string url; - uint16_t loc = 0; + int16_t loc = 0; register_producer_subcommand(CLI::App* actionRoot) { auto register_producer = actionRoot->add_subcommand("regproducer", localized("Register a new producer")); From 307dbf141b9627a01fd1758b033ede166212a83f Mon Sep 17 00:00:00 2001 From: thaipandada Date: Mon, 14 Jan 2019 18:40:16 +0800 Subject: [PATCH 20/21] fix cmake error --- CMakeLists.txt | 5 +++++ Docker/Dockerfile | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c92f5276c17..2303eee93b3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -37,6 +37,11 @@ set(VERSION_MAJOR 2) set(VERSION_MINOR 0) set(VERSION_PATCH 1) +if(VERSION_SUFFIX) + set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") +else() + set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}") +endif() set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) diff --git a/Docker/Dockerfile b/Docker/Dockerfile index 9dfb88fd632..81b09c4171a 100644 --- a/Docker/Dockerfile +++ b/Docker/Dockerfile @@ -10,7 +10,7 @@ RUN git clone -b $branch https://github.com/boscore/bos.git --recursive \ && cmake -H. -B"/tmp/build" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/tmp/build -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ - && cmake --build /tmp/build --target install && rm /tmp/build/bin/eosiocpp + && cmake --build /tmp/build --target install From f84de85554948999a8662f6fcc137bfb3add9c6c Mon Sep 17 00:00:00 2001 From: thaipandada Date: Tue, 15 Jan 2019 13:49:10 +0800 Subject: [PATCH 21/21] fix aragument name error --- programs/cleos/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 6c55ab03bcf..ddcb8c11728 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -564,7 +564,7 @@ fc::variant regproducer_variant(const account_name& producer, const public_key_t ("producer", producer) ("producer_key", key) ("url", url) - ("location", _location) + ("location", location) ; }