From 094164992093d4898ad87ec1d8c00df24f175b60 Mon Sep 17 00:00:00 2001 From: Eugene Chung Date: Thu, 16 Aug 2018 17:44:00 +0900 Subject: [PATCH 001/194] use config::producers_account_name instead of N(eosio.prods) --- unittests/multisig_tests.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unittests/multisig_tests.cpp b/unittests/multisig_tests.cpp index debf5d95f7c..80e5433e71f 100644 --- a/unittests/multisig_tests.cpp +++ b/unittests/multisig_tests.cpp @@ -396,7 +396,7 @@ BOOST_FIXTURE_TEST_CASE( update_system_contract_all_approve, eosio_msig_tester ) set_authority(config::system_account_name, "active", authority(1, vector{{get_private_key("eosio", "active").get_public_key(), 1}}, - vector{{{N(eosio.prods), config::active_name}, 1}}), "owner", + vector{{{config::producers_account_name, config::active_name}, 1}}), "owner", { { config::system_account_name, "active" } }, { get_private_key( config::system_account_name, "active" ) }); set_producers( {N(alice),N(bob),N(carol)} ); @@ -507,7 +507,7 @@ BOOST_FIXTURE_TEST_CASE( update_system_contract_major_approve, eosio_msig_tester // set up the link between (eosio active) and (eosio.prods active) set_authority(config::system_account_name, "active", authority(1, vector{{get_private_key("eosio", "active").get_public_key(), 1}}, - vector{{{N(eosio.prods), config::active_name}, 1}}), "owner", + vector{{{config::producers_account_name, config::active_name}, 1}}), "owner", { { config::system_account_name, "active" } }, { get_private_key( config::system_account_name, "active" ) }); create_accounts( { N(apple) } ); From 3d0135eb11168fd461ff11eedd490d1adb504076 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 17 Aug 2018 13:41:36 -0400 Subject: [PATCH 002/194] Update fc submodule --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index a6b2756b100..c058a8630e8 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit a6b2756b100098296f7548a191e6210e770b7b3a +Subproject commit c058a8630e8ccb23babf6f6489cffb48e3510c49 From 94bb2ca08b03587ad28fa199568f7be93805d5b8 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Wed, 22 Aug 2018 10:48:34 -0400 Subject: [PATCH 003/194] get_raw_abi --- plugins/chain_api_plugin/chain_api_plugin.cpp | 1 + plugins/chain_plugin/chain_plugin.cpp | 13 +++++++++++++ .../include/eosio/chain_plugin/chain_plugin.hpp | 13 +++++++++++++ 3 files changed, 27 insertions(+) diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 58098501f04..2456e15c340 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -87,6 +87,7 @@ void chain_api_plugin::plugin_startup() { CHAIN_RO_CALL(get_code, 200), CHAIN_RO_CALL(get_abi, 200), CHAIN_RO_CALL(get_raw_code_and_abi, 200), + CHAIN_RO_CALL(get_raw_abi, 200), CHAIN_RO_CALL(get_table_rows, 200), CHAIN_RO_CALL(get_currency_balance, 200), CHAIN_RO_CALL(get_currency_stats, 200), diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 11b62273808..fd153b3f3e9 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1502,6 +1502,19 @@ read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const g return result; } +read_only::get_raw_abi_results read_only::get_raw_abi( const get_raw_abi_params& params )const { + get_raw_abi_results result; + result.account_name = params.account_name; + + const auto& d = db.db(); + const auto& accnt = d.get(params.account_name); + // todo: fetch hash from table in eosio account + result.code_hash = fc::sha256::hash( accnt.code.data(), accnt.code.size() ); + result.abi = blob{{accnt.abi.begin(), accnt.abi.end()}}; + + return result; +} + read_only::get_account_results read_only::get_account( const get_account_params& params )const { get_account_results result; result.account_name = params.account_name; diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 2fd665d6255..0945825596f 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -171,10 +171,21 @@ class read_only { name account_name; }; + struct get_raw_abi_params { + name account_name; + }; + + struct get_raw_abi_results { + name account_name; + fc::sha256 code_hash; + chain::blob abi; + }; + get_code_results get_code( const get_code_params& params )const; get_abi_results get_abi( const get_abi_params& params )const; get_raw_code_and_abi_results get_raw_code_and_abi( const get_raw_code_and_abi_params& params)const; + get_raw_abi_results get_raw_abi( const get_raw_abi_params& params)const; @@ -648,6 +659,8 @@ FC_REFLECT( eosio::chain_apis::read_only::get_code_params, (account_name)(code_a FC_REFLECT( eosio::chain_apis::read_only::get_abi_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_raw_code_and_abi_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_raw_code_and_abi_results, (account_name)(wasm)(abi) ) +FC_REFLECT( eosio::chain_apis::read_only::get_raw_abi_params, (account_name) ) +FC_REFLECT( eosio::chain_apis::read_only::get_raw_abi_results, (account_name)(code_hash)(abi) ) FC_REFLECT( eosio::chain_apis::read_only::producer_info, (producer_name) ) FC_REFLECT( eosio::chain_apis::read_only::abi_json_to_bin_params, (code)(action)(args) ) FC_REFLECT( eosio::chain_apis::read_only::abi_json_to_bin_result, (binargs) ) From 300499b157224f43b0abcd655a41e0ee35849351 Mon Sep 17 00:00:00 2001 From: Aaron Cox Date: Wed, 22 Aug 2018 22:24:39 -0400 Subject: [PATCH 004/194] Fixed framework casing for case sensitive MacOS builds Fixes #5302, same issue as #2719 --- plugins/wallet_plugin/CMakeLists.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/wallet_plugin/CMakeLists.txt b/plugins/wallet_plugin/CMakeLists.txt index fc27ea76e7a..8b3a6d7d7b1 100644 --- a/plugins/wallet_plugin/CMakeLists.txt +++ b/plugins/wallet_plugin/CMakeLists.txt @@ -4,10 +4,10 @@ if(APPLE) set(SE_WALLET_SOURCES se_wallet.cpp macos_user_auth.m) set_source_files_properties(macos_user_presence.m PROPERTIES COMPILE_FLAGS "-x objective-c") - find_library(security_framework security) - find_library(localauthentication_framework localauthentication) - find_library(corefoundation_framework corefoundation) - find_library(cocoa_framework cocoa) + find_library(security_framework Security) + find_library(localauthentication_framework LocalAuthentication) + find_library(corefoundation_framework CoreFoundation) + find_library(cocoa_framework Cocoa) if(MAS_KEYCHAIN_GROUP) add_definitions(-DMAS_KEYCHAIN_GROUP=${MAS_KEYCHAIN_GROUP}) From 6ebe1d14577656850413b3175cf93d35e20ad0f5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 23 Aug 2018 13:42:23 -0500 Subject: [PATCH 005/194] Update fc --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 62a19a75868..b9d51de0dc0 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 62a19a758682679e3de27d956986eaf8b016465d +Subproject commit b9d51de0dc09ad5e48ef3a6179ec579b351ae6cc From 60f5e3d51d9e6eb01d93e27645550cd4aa7feac5 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 24 Aug 2018 13:06:43 -0400 Subject: [PATCH 006/194] Add a new WASM backend: wabt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new WASM backend using wabt’s interpreter. This interpreter is considerably faster than the current default of binaryen — a replay on the mainnet typically completes in less than half the time compared to binaryen. --- .gitmodules | 3 + libraries/CMakeLists.txt | 5 + libraries/chain/CMakeLists.txt | 5 +- .../include/eosio/chain/wasm_interface.hpp | 3 +- .../eosio/chain/wasm_interface_private.hpp | 6 +- .../include/eosio/chain/webassembly/wabt.hpp | 713 ++++++++++++++++++ libraries/chain/wasm_interface.cpp | 2 + libraries/chain/webassembly/wabt.cpp | 98 +++ .../testing/include/eosio/testing/tester.hpp | 2 + libraries/testing/tester.cpp | 2 + libraries/wabt | 1 + unittests/CMakeLists.txt | 3 + unittests/whitelist_blacklist_tests.cpp | 2 + 13 files changed, 842 insertions(+), 3 deletions(-) create mode 100644 libraries/chain/include/eosio/chain/webassembly/wabt.hpp create mode 100644 libraries/chain/webassembly/wabt.cpp create mode 160000 libraries/wabt diff --git a/.gitmodules b/.gitmodules index 16559d89417..7d0f8a37f7b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -27,3 +27,6 @@ [submodule "libraries/fc"] path = libraries/fc url = https://github.com/EOSIO/fc +[submodule "libraries/wabt"] + path = libraries/wabt + url = http://github.com/EOSIO/wabt diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index a36e79cf5d2..b67c86b0ed8 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -8,3 +8,8 @@ add_subdirectory( appbase ) add_subdirectory( chain ) add_subdirectory( testing ) add_subdirectory( abi_generator ) + +#turn these off for now +set(BUILD_TESTS OFF CACHE BOOL "Build GTest-based tests") +set(RUN_RE2C OFF CACHE BOOL "Run re2c") +add_subdirectory( wabt ) diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index af540a38eca..d23329548a4 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -38,6 +38,7 @@ add_library( eosio_chain webassembly/wavm.cpp webassembly/binaryen.cpp + webassembly/wabt.cpp # get_config.cpp # global_property_object.cpp @@ -50,12 +51,14 @@ add_library( eosio_chain ) target_link_libraries( eosio_chain eos_utilities fc chainbase Logging IR WAST WASM Runtime - wasm asmjs passes cfg ast emscripten-optimizer support softfloat builtins + wasm asmjs passes cfg ast emscripten-optimizer support softfloat builtins libwabt ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../wasm-jit/Include" "${CMAKE_CURRENT_SOURCE_DIR}/../../externals/binaryen/src" + "${CMAKE_SOURCE_DIR}/libraries/wabt" + "${CMAKE_BINARY_DIR}/libraries/wabt" ) install( TARGETS eosio_chain diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 82bd93233e6..17ac03fddfe 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -54,6 +54,7 @@ namespace eosio { namespace chain { enum class vm_type { wavm, binaryen, + wabt }; wasm_interface(vm_type vm); @@ -76,4 +77,4 @@ namespace eosio{ namespace chain { std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime); }} -FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(binaryen) ) +FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(binaryen)(wabt) ) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 496cf3b435b..df28d79a21b 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,8 @@ namespace eosio { namespace chain { runtime_interface = std::make_unique(); else if(vm == wasm_interface::vm_type::binaryen) runtime_interface = std::make_unique(); + else if(vm == wasm_interface::vm_type::wabt) + runtime_interface = std::make_unique(); else EOS_THROW(wasm_exception, "wasm_interface_impl fall through"); } @@ -95,7 +98,8 @@ namespace eosio { namespace chain { #define _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_WAVM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - _REGISTER_BINARYEN_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) + _REGISTER_BINARYEN_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ + _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) #define _REGISTER_INTRINSIC4(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG ) diff --git a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp new file mode 100644 index 00000000000..14b49736ac7 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp @@ -0,0 +1,713 @@ +#pragma once + +#include +#include +#include +#include +#include + +//wabt includes +#include +#include +#include +#include + +namespace eosio { namespace chain { namespace webassembly { namespace wabt_runtime { + +using namespace fc; +using namespace wabt; +using namespace wabt::interp; +using namespace eosio::chain::webassembly::common; + +struct wabt_apply_instance_vars { + Memory* memory; + apply_context& ctx; + + char* get_validated_pointer(uint32_t offset, uint32_t size) { + EOS_ASSERT(memory, wasm_execution_error, "access violation"); + EOS_ASSERT(offset + size <= memory->data.size() && offset + size >= offset, wasm_execution_error, "access violation"); + return memory->data.data() + offset; + } +}; + +struct intrinsic_registrator { + using intrinsic_fn = TypedValue(*)(wabt_apply_instance_vars&, const TypedValues&); + + struct intrinsic_func_info { + FuncSignature sig; + intrinsic_fn func; + }; + + static auto& get_map(){ + static map> _map; + return _map; + }; + + intrinsic_registrator(const char* mod, const char* name, const FuncSignature& sig, intrinsic_fn fn) { + get_map()[string(mod)][string(name)] = intrinsic_func_info{sig, fn}; + } +}; + +class wabt_runtime : public eosio::chain::wasm_runtime_interface { + public: + wabt_runtime(); + std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; + + private: + wabt::ReadBinaryOptions read_binary_options; //note default ctor will look at each option in feature.def and default to DISABLED for the feature +}; + +/** + * class to represent an in-wasm-memory array + * it is a hint to the transcriber that the next parameter will + * be a size (data bytes length) and that the pair are validated together + * This triggers the template specialization of intrinsic_invoker_impl + * @tparam T + */ +template +inline array_ptr array_ptr_impl (wabt_apply_instance_vars& vars, uint32_t ptr, uint32_t length) +{ + EOS_ASSERT( length < INT_MAX/(uint32_t)sizeof(T), binaryen_exception, "length will overflow" ); + return array_ptr((T*)(vars.get_validated_pointer(ptr, length * (uint32_t)sizeof(T)))); +} + +/** + * class to represent an in-wasm-memory char array that must be null terminated + */ +inline null_terminated_ptr null_terminated_ptr_impl(wabt_apply_instance_vars& vars, uint32_t ptr) +{ + char *value = vars.get_validated_pointer(ptr, 1); + const char* p = value; + const char* const top_of_memory = vars.memory->data.data() + vars.memory->data.size(); + while(p < top_of_memory) + if(*p++ == '\0') + return null_terminated_ptr(value); + + FC_THROW_EXCEPTION(wasm_execution_error, "unterminated string"); +} + + +template +struct is_reference_from_value { + static constexpr bool value = false; +}; + +template<> +struct is_reference_from_value { + static constexpr bool value = true; +}; + +template<> +struct is_reference_from_value { + static constexpr bool value = true; +}; + +template +constexpr bool is_reference_from_value_v = is_reference_from_value::value; + +template +T convert_literal_to_native(const TypedValue& v); + +template<> +inline double convert_literal_to_native(const TypedValue& v) { + return v.get_f64(); +} + +template<> +inline float convert_literal_to_native(const TypedValue& v) { + return v.get_f32(); +} + +template<> +inline int64_t convert_literal_to_native(const TypedValue& v) { + return v.get_i64(); +} + +template<> +inline uint64_t convert_literal_to_native(const TypedValue& v) { + return v.get_i64(); +} + +template<> +inline int32_t convert_literal_to_native(const TypedValue& v) { + return v.get_i32(); +} + +template<> +inline uint32_t convert_literal_to_native(const TypedValue& v) { + return v.get_i32(); +} + +template<> +inline bool convert_literal_to_native(const TypedValue& v) { + return v.get_i32(); +} + +template<> +inline name convert_literal_to_native(const TypedValue& v) { + int64_t val = v.get_i64(); + return name(val); +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const uint32_t &val) { + TypedValue tv(Type::I32); + tv.set_i32(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const int32_t &val) { + TypedValue tv(Type::I32); + tv.set_i32(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const uint64_t &val) { + TypedValue tv(Type::I64); + tv.set_i64(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const int64_t &val) { + TypedValue tv(Type::I64); + tv.set_i64(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const float &val) { + TypedValue tv(Type::F32); + tv.set_f32(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const double &val) { + TypedValue tv(Type::F64); + tv.set_f64(val); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const name &val) { + TypedValue tv(Type::I64); + tv.set_i64(val.value); + return tv; +} + +inline auto convert_native_to_literal(const wabt_apply_instance_vars& vars, char* ptr) { + const char* base = vars.memory->data.data(); + const char* top_of_memory = base + vars.memory->data.size(); + EOS_ASSERT(ptr >= base && ptr < top_of_memory, wasm_execution_error, "returning pointer not in linear memory"); + Value v; + v.i32 = (int)(ptr - base); + return TypedValue(Type::I32, v); +} + +struct void_type { +}; + +template +struct wabt_to_value_type; + +template<> +struct wabt_to_value_type { + static constexpr auto value = Type::F32; +}; + +template<> +struct wabt_to_value_type { + static constexpr auto value = Type::F64; +}; +template<> +struct wabt_to_value_type { + static constexpr auto value = Type::I32; +}; +template<> +struct wabt_to_value_type { + static constexpr auto value = Type::I64; +}; + +template +constexpr auto wabt_to_value_type_v = wabt_to_value_type::value; + +template +struct wabt_to_rvalue_type; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::F32; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::F64; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I32; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I64; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I64; +}; +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I64; +}; + +template<> +struct wabt_to_rvalue_type { + static constexpr auto value = Type::I32; +}; + +template +constexpr auto wabt_to_rvalue_type_v = wabt_to_rvalue_type::value; + +template +struct wabt_function_type_provider; + +template +struct wabt_function_type_provider { + static FuncSignature type() { + return FuncSignature({wabt_to_value_type_v ...}, {wabt_to_rvalue_type_v}); + } +}; +template +struct wabt_function_type_provider { + static FuncSignature type() { + return FuncSignature({wabt_to_value_type_v ...}, {}); + } +}; + +/** + * Forward declaration of the invoker type which transcribes arguments to/from a native method + * and injects the appropriate checks + * + * @tparam Ret - the return type of the native function + * @tparam NativeParameters - a std::tuple of the remaining native parameters to transcribe + * @tparam WasmParameters - a std::tuple of the transribed parameters + */ +template +struct intrinsic_invoker_impl; + +/** + * Specialization for the fully transcribed signature + * @tparam Ret - the return type of the native function + */ +template +struct intrinsic_invoker_impl> { + using next_method_type = Ret (*)(wabt_apply_instance_vars&, const TypedValues&, int); + + template + static TypedValue invoke(wabt_apply_instance_vars& vars, const TypedValues& args) { + return convert_native_to_literal(vars, Method(vars, args, args.size() - 1)); + } + + template + static const auto fn() { + return invoke; + } +}; + +/** + * specialization of the fully transcribed signature for void return values + * @tparam Translated - the arguments to the wasm function + */ +template<> +struct intrinsic_invoker_impl> { + using next_method_type = void_type (*)(wabt_apply_instance_vars&, const TypedValues&, int); + + template + static TypedValue invoke(wabt_apply_instance_vars& vars, const TypedValues& args) { + Method(vars, args, args.size() - 1); + return TypedValue(Type::Void); + } + + template + static const auto fn() { + return invoke; + } +}; + +/** + * Sepcialization for transcribing a simple type in the native method signature + * @tparam Ret - the return type of the native method + * @tparam Input - the type of the native parameter to transcribe + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, Input, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + auto& last = args.at(offset); + auto native = convert_literal_to_native(last); + return Then(vars, native, rest..., args, (uint32_t)offset - 1); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a array_ptr type in the native method signature + * This type transcribes into 2 wasm parameters: a pointer and byte length and checks the validity of that memory + * range before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl, size_t, Inputs...>> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, size_t, Inputs..., const TypedValues&, int); + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); + uint32_t ptr = args.at((uint32_t)offset - 1).get_i32(); + size_t length = args.at((uint32_t)offset).get_i32(); + T* base = array_ptr_impl(vars, ptr, length); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned array of const values" ); + std::vector > copy(length > 0 ? length : 1); + T* copy_ptr = ©[0]; + memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); + return Then(vars, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); + } + return Then(vars, static_cast>(base), length, rest..., args, (uint32_t)offset - 2); + }; + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); + uint32_t ptr = args.at((uint32_t)offset - 1).get_i32(); + size_t length = args.at((uint32_t)offset).get_i32(); + T* base = array_ptr_impl(vars, ptr, length); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned array of values" ); + std::vector > copy(length > 0 ? length : 1); + T* copy_ptr = ©[0]; + memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); + Ret ret = Then(vars, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); + memcpy( (void*)base, (void*)copy_ptr, length * sizeof(T) ); + return ret; + } + return Then(vars, static_cast>(base), length, rest..., args, (uint32_t)offset - 2); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a null_terminated_ptr type in the native method signature + * This type transcribes 1 wasm parameters: a char pointer which is validated to contain + * a null value before the end of the allocated memory. + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret(*)(wabt_apply_instance_vars&, null_terminated_ptr, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + return Then(vars, null_terminated_ptr_impl(vars, ptr), rest..., args, (uint32_t)offset - 1); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a pair of array_ptr types in the native method signature that share size + * This type transcribes into 3 wasm parameters: 2 pointers and byte length and checks the validity of those memory + * ranges before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl, array_ptr, size_t, Inputs...>> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, array_ptr, size_t, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + uint32_t ptr_t = args.at((uint32_t)offset - 2).get_i32(); + uint32_t ptr_u = args.at((uint32_t)offset - 1).get_i32(); + size_t length = args.at((uint32_t)offset).get_i32(); + static_assert(std::is_same, char>::value && std::is_same, char>::value, "Currently only support array of (const)chars"); + return Then(vars, array_ptr_impl(vars, ptr_t, length), array_ptr_impl(vars, ptr_u, length), length, args, (uint32_t)offset - 3); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing memset parameters + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl, int, size_t>> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, int, size_t, const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, const TypedValues& args, int offset) { + uint32_t ptr = args.at((uint32_t)offset - 2).get_i32(); + uint32_t value = args.at((uint32_t)offset - 1).get_i32(); + size_t length = args.at((uint32_t)offset).get_i32(); + return Then(vars, array_ptr_impl(vars, ptr, length), value, length, args, (uint32_t)offset - 3); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a pointer type in the native method signature + * This type transcribes into an int32 pointer checks the validity of that memory + * range before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, T *, Inputs..., const TypedValues&, int); + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + T* base = array_ptr_impl(vars, ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned const pointer" ); + std::remove_const_t copy; + T* copy_ptr = © + memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); + return Then(vars, copy_ptr, rest..., args, (uint32_t)offset - 1); + } + return Then(vars, base, rest..., args, (uint32_t)offset - 1); + }; + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + T* base = array_ptr_impl(vars, ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned pointer" ); + T copy; + memcpy( (void*)©, (void*)base, sizeof(T) ); + Ret ret = Then(vars, ©, rest..., args, (uint32_t)offset - 1); + memcpy( (void*)base, (void*)©, sizeof(T) ); + return ret; + } + return Then(vars, base, rest..., args, (uint32_t)offset - 1); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a reference to a name which can be passed as a native value + * This type transcribes into a native type which is loaded by value into a + * variable on the stack and then passed by reference to the intrinsic. + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, const name&, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + uint64_t wasm_value = args.at((uint32_t)offset).get_i64(); + auto value = name(wasm_value); + return Then(vars, value, rest..., args, (uint32_t)offset - 1); + } + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a reference to a fc::time_point_sec which can be passed as a native value + * This type transcribes into a native type which is loaded by value into a + * variable on the stack and then passed by reference to the intrinsic. + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, const fc::time_point_sec&, Inputs..., const TypedValues&, int); + + template + static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { + uint32_t wasm_value = args.at((uint32_t)offset).get_i32(); + auto value = fc::time_point_sec(wasm_value); + return Then(vars, value, rest..., args, (uint32_t)offset - 1); + } + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + + +/** + * Specialization for transcribing a reference type in the native method signature + * This type transcribes into an int32 pointer checks the validity of that memory + * range before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + */ +template +struct intrinsic_invoker_impl> { + using next_step = intrinsic_invoker_impl>; + using then_type = Ret (*)(wabt_apply_instance_vars&, T &, Inputs..., const TypedValues&, int); + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + // references cannot be created for null pointers + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); + T* base = array_ptr_impl(vars, ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned const reference" ); + std::remove_const_t copy; + T* copy_ptr = © + memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); + return Then(vars, *copy_ptr, rest..., args, (uint32_t)offset - 1); + } + return Then(vars, *base, rest..., args, (uint32_t)offset - 1); + } + + template + static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { + // references cannot be created for null pointers + uint32_t ptr = args.at((uint32_t)offset).get_i32(); + EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); + T* base = array_ptr_impl(vars, ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + wlog( "misaligned reference" ); + T copy; + memcpy( (void*)©, (void*)base, sizeof(T) ); + Ret ret = Then(vars, copy, rest..., args, (uint32_t)offset - 1); + memcpy( (void*)base, (void*)©, sizeof(T) ); + return ret; + } + return Then(vars, *base, rest..., args, (uint32_t)offset - 1); + } + + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +extern apply_context* fixme_context; + +/** + * forward declaration of a wrapper class to call methods of the class + */ +template +struct intrinsic_function_invoker { + using impl = intrinsic_invoker_impl>; + + template + static Ret wrapper(wabt_apply_instance_vars& vars, Params... params, const TypedValues&, int) { + class_from_wasm::value(vars.ctx).checktime(); + return (class_from_wasm::value(vars.ctx).*Method)(params...); + } + + template + static const intrinsic_registrator::intrinsic_fn fn() { + return impl::template fn>(); + } +}; + +template +struct intrinsic_function_invoker { + using impl = intrinsic_invoker_impl>; + + template + static void_type wrapper(wabt_apply_instance_vars& vars, Params... params, const TypedValues& args, int offset) { + class_from_wasm::value(vars.ctx).checktime(); + (class_from_wasm::value(vars.ctx).*Method)(params...); + return void_type(); + } + + template + static const intrinsic_registrator::intrinsic_fn fn() { + return impl::template fn>(); + } + +}; + +template +struct intrinsic_function_invoker_wrapper; + +template +struct intrinsic_function_invoker_wrapper { + using type = intrinsic_function_invoker; +}; + +template +struct intrinsic_function_invoker_wrapper { + using type = intrinsic_function_invoker; +}; + +template +struct intrinsic_function_invoker_wrapper { + using type = intrinsic_function_invoker; +}; + +template +struct intrinsic_function_invoker_wrapper { + using type = intrinsic_function_invoker; +}; + +#define __INTRINSIC_NAME(LABEL, SUFFIX) LABEL##SUFFIX +#define _INTRINSIC_NAME(LABEL, SUFFIX) __INTRINSIC_NAME(LABEL,SUFFIX) + +#define _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ + static eosio::chain::webassembly::wabt_runtime::intrinsic_registrator _INTRINSIC_NAME(__wabt_intrinsic_fn, __COUNTER__) (\ + MOD,\ + NAME,\ + eosio::chain::webassembly::wabt_runtime::wabt_function_type_provider::type(),\ + eosio::chain::webassembly::wabt_runtime::intrinsic_function_invoker_wrapper::type::fn<&CLS::METHOD>()\ + );\ + +} } } }// eosio::chain::webassembly::wabt_runtime diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index abe5a745766..ac580045277 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -1913,6 +1913,8 @@ std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime) { runtime = eosio::chain::wasm_interface::vm_type::wavm; else if (s == "binaryen") runtime = eosio::chain::wasm_interface::vm_type::binaryen; + else if (s == "wabt") + runtime = eosio::chain::wasm_interface::vm_type::wabt; else in.setstate(std::ios_base::failbit); return in; diff --git a/libraries/chain/webassembly/wabt.cpp b/libraries/chain/webassembly/wabt.cpp new file mode 100644 index 00000000000..7b303505aa9 --- /dev/null +++ b/libraries/chain/webassembly/wabt.cpp @@ -0,0 +1,98 @@ +#include +#include +#include + +//wabt includes +#include +#include + +namespace eosio { namespace chain { namespace webassembly { namespace wabt_runtime { + +//yep 🤮 +static wabt_apply_instance_vars* static_wabt_vars; + +using namespace wabt; +using namespace wabt::interp; +namespace wasm_constraints = eosio::chain::wasm_constraints; + +class wabt_instantiated_module : public wasm_instantiated_module_interface { + public: + wabt_instantiated_module(std::unique_ptr e, std::vector initial_mem, interp::DefinedModule* mod) : + _env(move(e)), _instatiated_module(mod), _initial_memory(initial_mem), + _executor(_env.get(), nullptr, Thread::Options(64*1024, + wasm_constraints::maximum_call_depth+2)) + { + for(Index i = 0; i < _env->GetGlobalCount(); ++i) { + if(_env->GetGlobal(i)->mutable_ == false) + continue; + _initial_globals.emplace_back(_env->GetGlobal(i), _env->GetGlobal(i)->typed_value); + } + + if(_env->GetMemoryCount()) + _initial_memory_configuration = _env->GetMemory(0)->page_limits; + } + + void apply(apply_context& context) override { + //reset mutable globals + for(const auto& mg : _initial_globals) + mg.first->typed_value = mg.second; + + wabt_apply_instance_vars this_run_vars{nullptr, context}; + static_wabt_vars = &this_run_vars; + + //reset memory to inital size & copy back in initial data + if(_env->GetMemoryCount()) { + Memory* memory = this_run_vars.memory = _env->GetMemory(0); + memory->page_limits = _initial_memory_configuration; + memory->data.resize(_initial_memory_configuration.initial * WABT_PAGE_SIZE); + memset(memory->data.data(), 0, memory->data.size()); + memcpy(memory->data.data(), _initial_memory.data(), _initial_memory.size()); + } + + _params[0].set_i64(uint64_t(context.receiver)); + _params[1].set_i64(uint64_t(context.act.account)); + _params[2].set_i64(uint64_t(context.act.name)); + + ExecResult res = _executor.RunStartFunction(_instatiated_module); + EOS_ASSERT( res.result == interp::Result::Ok, wasm_execution_error, "wabt start function failure (${s})", ("s", ResultToString(res.result)) ); + + res = _executor.RunExportByName(_instatiated_module, "apply", _params); + EOS_ASSERT( res.result == interp::Result::Ok, wasm_execution_error, "wabt execution failure (${s})", ("s", ResultToString(res.result)) ); + } + + private: + std::unique_ptr _env; + DefinedModule* _instatiated_module; //this is owned by the Environment + std::vector _initial_memory; + TypedValues _params{3, TypedValue(Type::I64)}; + std::vector> _initial_globals; + Limits _initial_memory_configuration; + Executor _executor; +}; + +wabt_runtime::wabt_runtime() {} + +std::unique_ptr wabt_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) { + std::unique_ptr env = std::make_unique(); + for(auto it = intrinsic_registrator::get_map().begin() ; it != intrinsic_registrator::get_map().end(); ++it) { + interp::HostModule* host_module = env->AppendHostModule(it->first); + for(auto itf = it->second.begin(); itf != it->second.end(); ++itf) { + host_module->AppendFuncExport(itf->first, itf->second.sig, [fn=itf->second.func](const auto* f, const auto* fs, const auto& args, auto& res) { + TypedValue ret = fn(*static_wabt_vars, args); + if(ret.type != Type::Void) + res[0] = ret; + return interp::Result::Ok; + }); + } + } + + interp::DefinedModule* instantiated_module = nullptr; + ErrorHandlerBuffer error_handler(Location::Type::Binary); + + wabt::Result res = ReadBinaryInterp(env.get(), code_bytes, code_size, read_binary_options, &error_handler, &instantiated_module); + EOS_ASSERT( Succeeded(res), wasm_execution_error, "Error building wabt interp: ${e}", ("e", error_handler.buffer()) ); + + return std::make_unique(std::move(env), initial_memory, instantiated_module); +} + +}}}} diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index d502adefd15..72215a6284a 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -332,6 +332,8 @@ namespace eosio { namespace testing { vcfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) vcfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) + vcfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; } diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 051d598d2a6..19faea3e420 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -100,6 +100,8 @@ namespace eosio { namespace testing { cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) + cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; else cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; } diff --git a/libraries/wabt b/libraries/wabt new file mode 160000 index 00000000000..67381cbe17e --- /dev/null +++ b/libraries/wabt @@ -0,0 +1 @@ +Subproject commit 67381cbe17e0ef87d40f3376e99aea7fff0fa0b1 diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 7442b00a69e..8b935138c2d 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -38,6 +38,9 @@ add_test(NAME unit_test_binaryen COMMAND unit_test add_test(NAME unit_test_wavm COMMAND unit_test -t \!wasm_tests/weighted_cpu_limit_tests --report_level=detailed --color_output --catch_system_errors=no -- --wavm) + add_test(NAME unit_test_wabt COMMAND unit_test + -t \!wasm_tests/weighted_cpu_limit_tests + --report_level=detailed --color_output -- --wabt) if(ENABLE_COVERAGE_TESTING) diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index ed359a703a7..1621e2ef916 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -45,6 +45,8 @@ class whitelist_blacklist_tester { cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) + cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; else cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; } From 568d82dcc46f108a24e7c12f803f18bc99da8f5c Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sat, 11 Aug 2018 16:07:19 -0400 Subject: [PATCH 007/194] Add cleos support for HTTP over UNIX socket --- programs/cleos/httpc.cpp | 17 ++++++++++++++++- programs/cleos/httpc.hpp | 7 +++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 343e396ef95..552863e277b 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -104,6 +104,13 @@ namespace eosio { namespace client { namespace http { parsed_url parse_url( const string& server_url ) { parsed_url res; + //unix socket doesn't quite follow classical "URL" rules so deal with it manually + if(boost::algorithm::starts_with(server_url, "unix://")) { + res.scheme = "unix"; + res.server = server_url.substr(strlen("unix://")); + return res; + } + //via rfc3986 and modified a bit to suck out the port number //Sadly this doesn't work for ipv6 addresses std::regex rgx(R"xx(^(([^:/?#]+):)?(//([^:/?#]*)(:(\d+))?)?([^?#]*)(\?([^#]*))?(#(.*))?)xx"); @@ -125,6 +132,9 @@ namespace eosio { namespace client { namespace http { } resolved_url resolve_url( const http_context& context, const parsed_url& url ) { + if(url.scheme == "unix") + return resolved_url(url); + tcp::resolver resolver(context->ios); boost::system::error_code ec; auto result = resolver.resolve(tcp::v4(), url.server, url.port, ec); @@ -207,7 +217,12 @@ namespace eosio { namespace client { namespace http { std::string re; try { - if(url.scheme == "http") { + if(url.scheme == "unix") { + boost::asio::local::stream_protocol::socket unix_socket(cp.context->ios); + unix_socket.connect(boost::asio::local::stream_protocol::endpoint(url.server)); + re = do_txrx(unix_socket, request, status_code); + } + else if(url.scheme == "http") { tcp::socket socket(cp.context->ios); do_connect(socket, url); re = do_txrx(socket, request, status_code); diff --git a/programs/cleos/httpc.hpp b/programs/cleos/httpc.hpp index 0fcd9b8490d..7443c9b0137 100644 --- a/programs/cleos/httpc.hpp +++ b/programs/cleos/httpc.hpp @@ -42,9 +42,12 @@ namespace eosio { namespace client { namespace http { { } + //used for unix domain, where resolving and ports are nonapplicable + resolved_url(const parsed_url& url) : parsed_url(url) {} + vector resolved_addresses; - uint16_t resolved_port; - bool is_loopback; + uint16_t resolved_port = 0; + bool is_loopback = false; }; resolved_url resolve_url( const http_context& context, From 66f586c3f4890be1d52c52f32948909e7c394c52 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 12 Aug 2018 11:24:12 -0400 Subject: [PATCH 008/194] Add unix socket capablity to http_plugin Add the ablity to run a unix socket HTTP server in http_plugin. The local_endpoint.hpp is a bit of a copy paste job from the asio endpoint in websocketpp. It may be possible to strip this down further with more investigation. unix socket server is currently disabled pending proper configuration items for it --- plugins/http_plugin/http_plugin.cpp | 82 +- .../eosio/http_plugin/local_endpoint.hpp | 790 ++++++++++++++++++ 2 files changed, 861 insertions(+), 11 deletions(-) create mode 100644 plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 0a4984381ee..3c79a016c86 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -3,6 +3,7 @@ * @copyright defined in eos/LICENSE.txt */ #include +#include #include #include @@ -79,9 +80,42 @@ namespace eosio { static const long timeout_open_handshake = 0; }; + + struct asio_local_with_stub_log : public websocketpp::config::asio { + typedef asio_local_with_stub_log type; + typedef asio base; + + typedef base::concurrency_type concurrency_type; + + typedef base::request_type request_type; + typedef base::response_type response_type; + + typedef base::message_type message_type; + typedef base::con_msg_manager_type con_msg_manager_type; + typedef base::endpoint_msg_manager_type endpoint_msg_manager_type; + + typedef websocketpp::log::stub elog_type; + typedef websocketpp::log::stub alog_type; + + typedef base::rng_type rng_type; + + struct transport_config : public base::transport_config { + typedef type::concurrency_type concurrency_type; + typedef type::alog_type alog_type; + typedef type::elog_type elog_type; + typedef type::request_type request_type; + typedef type::response_type response_type; + typedef websocketpp::transport::asio::basic_socket::local_endpoint socket_type; + }; + + typedef websocketpp::transport::asio::local_endpoint transport_type; + + static const long timeout_open_handshake = 0; + }; } using websocket_server_type = websocketpp::server>; + using websocket_local_server_type = websocketpp::server; using websocket_server_tls_type = websocketpp::server>; using ssl_context_ptr = websocketpp::lib::shared_ptr; @@ -98,6 +132,7 @@ namespace eosio { size_t max_body_size; websocket_server_type server; + websocket_local_server_type local_server; optional https_listen_endpoint; string https_cert_chain; @@ -163,7 +198,7 @@ namespace eosio { } template - static void handle_exception(typename websocketpp::server>::connection_ptr con) { + static void handle_exception(typename websocketpp::server::connection_ptr con) { string err = "Internal Service error, http: "; try { con->set_status( websocketpp::http::status_code::internal_server_error ); @@ -195,18 +230,26 @@ namespace eosio { } template - void handle_http_request(typename websocketpp::server>::connection_ptr con) { - try { - bool is_secure = con->get_uri()->get_secure(); - const auto& local_endpoint = con->get_socket().lowest_layer().local_endpoint(); - auto local_socket_host_port = local_endpoint.address().to_string() + ":" + std::to_string(local_endpoint.port()); + bool allow_host(const typename T::request_type& req, typename websocketpp::server::connection_ptr con) { + bool is_secure = con->get_uri()->get_secure(); + const auto& local_endpoint = con->get_socket().lowest_layer().local_endpoint(); + auto local_socket_host_port = local_endpoint.address().to_string() + ":" + std::to_string(local_endpoint.port()); + + const auto& host_str = req.get_header("Host"); + if (host_str.empty() || !host_is_valid(host_str, local_socket_host_port, is_secure)) { + con->set_status(websocketpp::http::status_code::bad_request); + return false; + } + return true; + } + template + void handle_http_request(typename websocketpp::server::connection_ptr con) { + try { auto& req = con->get_request(); - const auto& host_str = req.get_header("Host"); - if (host_str.empty() || !host_is_valid(host_str, local_socket_host_port, is_secure)) { - con->set_status(websocketpp::http::status_code::bad_request); + + if(!allow_host(req, con)) return; - } if( !access_control_allow_origin.empty()) { con->append_header( "Access-Control-Allow-Origin", access_control_allow_origin ); @@ -258,7 +301,7 @@ namespace eosio { ws.set_reuse_addr(true); ws.set_max_http_body_size(max_body_size); ws.set_http_handler([&](connection_hdl hdl) { - handle_http_request(ws.get_con_from_hdl(hdl)); + handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ elog( "http: ${e}", ("e",e.to_detail_string())); @@ -277,6 +320,11 @@ namespace eosio { }; + template<> + bool http_plugin_impl::allow_host(const detail::asio_local_with_stub_log::request_type& req, websocketpp::server::connection_ptr con) { + return true; + } + http_plugin::http_plugin():my(new http_plugin_impl()){} http_plugin::~http_plugin(){} @@ -413,6 +461,18 @@ namespace eosio { } } +//disabled until configuration items sorted out +#if 0 + my->local_server.clear_access_channels(websocketpp::log::alevel::all); + my->local_server.init_asio(&app().get_io_service()); + my->local_server.set_max_http_body_size(my->max_body_size); + my->local_server.listen(boost::asio::local::stream_protocol::endpoint("/tmp/test")); + my->local_server.set_http_handler([&](connection_hdl hdl) { + my->handle_http_request( my->local_server.get_con_from_hdl(hdl)); + }); + my->local_server.start_accept(); +#endif + if(my->https_listen_endpoint) { try { my->create_server_for_endpoint(*my->https_listen_endpoint, my->https_server); diff --git a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp new file mode 100644 index 00000000000..c59f896c57e --- /dev/null +++ b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp @@ -0,0 +1,790 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#include + +#include +#include + +namespace websocketpp { +namespace transport { +namespace asio { + +namespace basic_socket { + +class local_connection : public lib::enable_shared_from_this { +public: + /// Type of this connection socket component + typedef local_connection type; + /// Type of a shared pointer to this connection socket component + typedef lib::shared_ptr ptr; + + /// Type of a pointer to the Asio io_service being used + typedef lib::asio::io_service* io_service_ptr; + /// Type of a pointer to the Asio io_service strand being used + typedef lib::shared_ptr strand_ptr; + /// Type of the ASIO socket being used + typedef lib::asio::local::stream_protocol::socket socket_type; + /// Type of a shared pointer to the socket being used. + typedef lib::shared_ptr socket_ptr; + + explicit local_connection() : m_state(UNINITIALIZED) { + } + + ptr get_shared() { + return shared_from_this(); + } + + bool is_secure() const { + return false; + } + + lib::asio::local::stream_protocol::socket & get_socket() { + return *m_socket; + } + + lib::asio::local::stream_protocol::socket & get_next_layer() { + return *m_socket; + } + + lib::asio::local::stream_protocol::socket & get_raw_socket() { + return *m_socket; + } + + std::string get_remote_endpoint(lib::error_code & ec) const { + return "fixme"; + } + + void pre_init(init_handler callback) { + if (m_state != READY) { + callback(socket::make_error_code(socket::error::invalid_state)); + return; + } + + m_state = READING; + + callback(lib::error_code()); + } + + void post_init(init_handler callback) { + callback(lib::error_code()); + } +protected: + lib::error_code init_asio (io_service_ptr service, strand_ptr, bool) + { + if (m_state != UNINITIALIZED) { + return socket::make_error_code(socket::error::invalid_state); + } + + m_socket = lib::make_shared( + lib::ref(*service)); + + m_state = READY; + + return lib::error_code(); + } + + void set_handle(connection_hdl hdl) { + m_hdl = hdl; + } + + lib::asio::error_code cancel_socket() { + lib::asio::error_code ec; + m_socket->cancel(ec); + return ec; + } + + void async_shutdown(socket::shutdown_handler h) { + lib::asio::error_code ec; + m_socket->shutdown(lib::asio::ip::tcp::socket::shutdown_both, ec); + h(ec); + } + + lib::error_code get_ec() const { + return lib::error_code(); + } + + template + lib::error_code translate_ec(ErrorCodeType) { + return make_error_code(transport::error::pass_through); + } + + lib::error_code translate_ec(lib::error_code ec) { + return ec; + } +private: + enum state { + UNINITIALIZED = 0, + READY = 1, + READING = 2 + }; + + socket_ptr m_socket; + state m_state; + + connection_hdl m_hdl; + socket_init_handler m_socket_init_handler; +}; + +class local_endpoint { +public: + /// The type of this endpoint socket component + typedef local_endpoint type; + + /// The type of the corresponding connection socket component + typedef local_connection socket_con_type; + /// The type of a shared pointer to the corresponding connection socket + /// component. + typedef socket_con_type::ptr socket_con_ptr; + + explicit local_endpoint() {} + + bool is_secure() const { + return false; + } +}; +} + +/// Asio based endpoint transport component +/** + * transport::asio::endpoint implements an endpoint transport component using + * Asio. + */ +template +class local_endpoint : public config::socket_type { +public: + /// Type of this endpoint transport component + typedef local_endpoint type; + + /// Type of the concurrency policy + typedef typename config::concurrency_type concurrency_type; + /// Type of the socket policy + typedef typename config::socket_type socket_type; + /// Type of the error logging policy + typedef typename config::elog_type elog_type; + /// Type of the access logging policy + typedef typename config::alog_type alog_type; + + /// Type of the socket connection component + typedef typename socket_type::socket_con_type socket_con_type; + /// Type of a shared pointer to the socket connection component + typedef typename socket_con_type::ptr socket_con_ptr; + + /// Type of the connection transport component associated with this + /// endpoint transport component + typedef asio::connection transport_con_type; + /// Type of a shared pointer to the connection transport component + /// associated with this endpoint transport component + typedef typename transport_con_type::ptr transport_con_ptr; + + /// Type of a pointer to the ASIO io_service being used + typedef lib::asio::io_service * io_service_ptr; + /// Type of a shared pointer to the acceptor being used + typedef lib::shared_ptr acceptor_ptr; + /// Type of timer handle + typedef lib::shared_ptr timer_ptr; + /// Type of a shared pointer to an io_service work object + typedef lib::shared_ptr work_ptr; + + // generate and manage our own io_service + explicit local_endpoint() + : m_io_service(NULL) + , m_state(UNINITIALIZED) + { + //std::cout << "transport::asio::endpoint constructor" << std::endl; + } + + ~local_endpoint() { + if (m_acceptor && m_state == LISTENING) + ::unlink(m_acceptor->local_endpoint().path().c_str()); + + // Explicitly destroy local objects + m_acceptor.reset(); + m_work.reset(); + } + + /// transport::asio objects are moveable but not copyable or assignable. + /// The following code sets this situation up based on whether or not we + /// have C++11 support or not +#ifdef _WEBSOCKETPP_DEFAULT_DELETE_FUNCTIONS_ + local_endpoint(const local_endpoint & src) = delete; + local_endpoint& operator= (const local_endpoint & rhs) = delete; +#else +private: + local_endpoint(const local_endpoint & src); + local_endpoint & operator= (const local_endpoint & rhs); +public: +#endif // _WEBSOCKETPP_DEFAULT_DELETE_FUNCTIONS_ + +#ifdef _WEBSOCKETPP_MOVE_SEMANTICS_ + local_endpoint (local_endpoint && src) + : config::socket_type(std::move(src)) + , m_tcp_pre_init_handler(src.m_tcp_pre_init_handler) + , m_tcp_post_init_handler(src.m_tcp_post_init_handler) + , m_io_service(src.m_io_service) + , m_acceptor(src.m_acceptor) + , m_elog(src.m_elog) + , m_alog(src.m_alog) + , m_state(src.m_state) + { + src.m_io_service = NULL; + src.m_acceptor = NULL; + src.m_state = UNINITIALIZED; + } + +#endif // _WEBSOCKETPP_MOVE_SEMANTICS_ + + /// Return whether or not the endpoint produces secure connections. + bool is_secure() const { + return socket_type::is_secure(); + } + + /// initialize asio transport with external io_service (exception free) + /** + * Initialize the ASIO transport policy for this endpoint using the provided + * io_service object. asio_init must be called exactly once on any endpoint + * that uses transport::asio before it can be used. + * + * @param ptr A pointer to the io_service to use for asio events + * @param ec Set to indicate what error occurred, if any. + */ + void init_asio(io_service_ptr ptr, lib::error_code & ec) { + if (m_state != UNINITIALIZED) { + m_elog->write(log::elevel::library, + "asio::init_asio called from the wrong state"); + using websocketpp::error::make_error_code; + ec = make_error_code(websocketpp::error::invalid_state); + return; + } + + m_alog->write(log::alevel::devel,"asio::init_asio"); + + m_io_service = ptr; + m_acceptor = lib::make_shared( + lib::ref(*m_io_service)); + + m_state = READY; + ec = lib::error_code(); + } + + /// initialize asio transport with external io_service + /** + * Initialize the ASIO transport policy for this endpoint using the provided + * io_service object. asio_init must be called exactly once on any endpoint + * that uses transport::asio before it can be used. + * + * @param ptr A pointer to the io_service to use for asio events + */ + void init_asio(io_service_ptr ptr) { + lib::error_code ec; + init_asio(ptr,ec); + if (ec) { throw exception(ec); } + } + + /// Sets the tcp pre init handler + /** + * The tcp pre init handler is called after the raw tcp connection has been + * established but before any additional wrappers (proxy connects, TLS + * handshakes, etc) have been performed. + * + * @since 0.3.0 + * + * @param h The handler to call on tcp pre init. + */ + void set_tcp_pre_init_handler(tcp_init_handler h) { + m_tcp_pre_init_handler = h; + } + + /// Sets the tcp pre init handler (deprecated) + /** + * The tcp pre init handler is called after the raw tcp connection has been + * established but before any additional wrappers (proxy connects, TLS + * handshakes, etc) have been performed. + * + * @deprecated Use set_tcp_pre_init_handler instead + * + * @param h The handler to call on tcp pre init. + */ + void set_tcp_init_handler(tcp_init_handler h) { + set_tcp_pre_init_handler(h); + } + + /// Sets the tcp post init handler + /** + * The tcp post init handler is called after the tcp connection has been + * established and all additional wrappers (proxy connects, TLS handshakes, + * etc have been performed. This is fired before any bytes are read or any + * WebSocket specific handshake logic has been performed. + * + * @since 0.3.0 + * + * @param h The handler to call on tcp post init. + */ + void set_tcp_post_init_handler(tcp_init_handler h) { + m_tcp_post_init_handler = h; + } + + /// Retrieve a reference to the endpoint's io_service + /** + * The io_service may be an internal or external one. This may be used to + * call methods of the io_service that are not explicitly wrapped by the + * endpoint. + * + * This method is only valid after the endpoint has been initialized with + * `init_asio`. No error will be returned if it isn't. + * + * @return A reference to the endpoint's io_service + */ + lib::asio::io_service & get_io_service() { + return *m_io_service; + } + + /// Set up endpoint for listening manually (exception free) + /** + * Bind the internal acceptor using the specified settings. The endpoint + * must have been initialized by calling init_asio before listening. + * + * @param ep An endpoint to read settings from + * @param ec Set to indicate what error occurred, if any. + */ + void listen(lib::asio::local::stream_protocol::endpoint const & ep, lib::error_code & ec) + { + if (m_state != READY) { + m_elog->write(log::elevel::library, + "asio::listen called from the wrong state"); + using websocketpp::error::make_error_code; + ec = make_error_code(websocketpp::error::invalid_state); + return; + } + + m_alog->write(log::alevel::devel,"asio::listen"); + + lib::asio::error_code bec; + + { + boost::system::error_code test_ec; + lib::asio::local::stream_protocol::socket test_socket(get_io_service()); + test_socket.connect(ep, test_ec); + + //looks like a service is already running on that socket, probably another keosd, don't touch it + if(test_ec == boost::system::errc::success) + bec = boost::system::errc::make_error_code(boost::system::errc::address_in_use); + //socket exists but no one home, go ahead and remove it and continue on + else if(test_ec == boost::system::errc::connection_refused) + ::unlink(ep.path().c_str()); + else if(test_ec != boost::system::errc::no_such_file_or_directory) + bec = test_ec; + } + + if (!bec) { + m_acceptor->open(ep.protocol(),bec); + } + if (!bec) { + mode_t old_mask = umask(S_IXUSR|S_IXGRP|S_IRWXO); + m_acceptor->bind(ep,bec); + umask(old_mask); + } + if (!bec) { + m_acceptor->listen(boost::asio::socket_base::max_listen_connections,bec); + } + if (bec) { + if (m_acceptor->is_open()) { + m_acceptor->close(); + } + log_err(log::elevel::info,"asio listen",bec); + ec = bec;//make_error_code(error::pass_through); + } else { + m_state = LISTENING; + ec = lib::error_code(); + } + } + + /// Set up endpoint for listening manually + /** + * Bind the internal acceptor using the settings specified by the endpoint e + * + * @param ep An endpoint to read settings from + */ + void listen(lib::asio::local::stream_protocol::endpoint const & ep) { + lib::error_code ec; + listen(ep,ec); + if (ec) { throw exception(ec); } + } + + /// Stop listening (exception free) + /** + * Stop listening and accepting new connections. This will not end any + * existing connections. + * + * @since 0.3.0-alpha4 + * @param ec A status code indicating an error, if any. + */ + void stop_listening(lib::error_code & ec) { + if (m_state != LISTENING) { + m_elog->write(log::elevel::library, + "asio::listen called from the wrong state"); + using websocketpp::error::make_error_code; + ec = make_error_code(websocketpp::error::invalid_state); + return; + } + + ::unlink(m_acceptor->local_endpoint().path().c_str()); + m_acceptor->close(); + m_state = READY; + ec = lib::error_code(); + } + + /// Stop listening + /** + * Stop listening and accepting new connections. This will not end any + * existing connections. + * + * @since 0.3.0-alpha4 + */ + void stop_listening() { + lib::error_code ec; + stop_listening(ec); + if (ec) { throw exception(ec); } + } + + /// Check if the endpoint is listening + /** + * @return Whether or not the endpoint is listening. + */ + bool is_listening() const { + return (m_state == LISTENING); + } + + /// wraps the run method of the internal io_service object + std::size_t run() { + return m_io_service->run(); + } + + /// wraps the run_one method of the internal io_service object + /** + * @since 0.3.0-alpha4 + */ + std::size_t run_one() { + return m_io_service->run_one(); + } + + /// wraps the stop method of the internal io_service object + void stop() { + m_io_service->stop(); + } + + /// wraps the poll method of the internal io_service object + std::size_t poll() { + return m_io_service->poll(); + } + + /// wraps the poll_one method of the internal io_service object + std::size_t poll_one() { + return m_io_service->poll_one(); + } + + /// wraps the reset method of the internal io_service object + void reset() { + m_io_service->reset(); + } + + /// wraps the stopped method of the internal io_service object + bool stopped() const { + return m_io_service->stopped(); + } + + /// Marks the endpoint as perpetual, stopping it from exiting when empty + /** + * Marks the endpoint as perpetual. Perpetual endpoints will not + * automatically exit when they run out of connections to process. To stop + * a perpetual endpoint call `end_perpetual`. + * + * An endpoint may be marked perpetual at any time by any thread. It must be + * called either before the endpoint has run out of work or before it was + * started + * + * @since 0.3.0 + */ + void start_perpetual() { + m_work = lib::make_shared( + lib::ref(*m_io_service) + ); + } + + /// Clears the endpoint's perpetual flag, allowing it to exit when empty + /** + * Clears the endpoint's perpetual flag. This will cause the endpoint's run + * method to exit normally when it runs out of connections. If there are + * currently active connections it will not end until they are complete. + * + * @since 0.3.0 + */ + void stop_perpetual() { + m_work.reset(); + } + + /// Call back a function after a period of time. + /** + * Sets a timer that calls back a function after the specified period of + * milliseconds. Returns a handle that can be used to cancel the timer. + * A cancelled timer will return the error code error::operation_aborted + * A timer that expired will return no error. + * + * @param duration Length of time to wait in milliseconds + * @param callback The function to call back when the timer has expired + * @return A handle that can be used to cancel the timer if it is no longer + * needed. + */ + timer_ptr set_timer(long duration, timer_handler callback) { + timer_ptr new_timer = lib::make_shared( + *m_io_service, + lib::asio::milliseconds(duration) + ); + + new_timer->async_wait( + lib::bind( + &type::handle_timer, + this, + new_timer, + callback, + lib::placeholders::_1 + ) + ); + + return new_timer; + } + + /// Timer handler + /** + * The timer pointer is included to ensure the timer isn't destroyed until + * after it has expired. + * + * @param t Pointer to the timer in question + * @param callback The function to call back + * @param ec A status code indicating an error, if any. + */ + void handle_timer(timer_ptr, timer_handler callback, + lib::asio::error_code const & ec) + { + if (ec) { + if (ec == lib::asio::error::operation_aborted) { + callback(make_error_code(transport::error::operation_aborted)); + } else { + m_elog->write(log::elevel::info, + "asio handle_timer error: "+ec.message()); + log_err(log::elevel::info,"asio handle_timer",ec); + callback(ec); + } + } else { + callback(lib::error_code()); + } + } + + /// Accept the next connection attempt and assign it to con (exception free) + /** + * @param tcon The connection to accept into. + * @param callback The function to call when the operation is complete. + * @param ec A status code indicating an error, if any. + */ + void async_accept(transport_con_ptr tcon, accept_handler callback, + lib::error_code & ec) + { + if (m_state != LISTENING) { + using websocketpp::error::make_error_code; + ec = make_error_code(websocketpp::error::async_accept_not_listening); + return; + } + + m_alog->write(log::alevel::devel, "asio::async_accept"); + + if (config::enable_multithreading) { + m_acceptor->async_accept( + tcon->get_raw_socket(), + tcon->get_strand()->wrap(lib::bind( + &type::handle_accept, + this, + callback, + lib::placeholders::_1 + )) + ); + } else { + m_acceptor->async_accept( + tcon->get_raw_socket(), + lib::bind( + &type::handle_accept, + this, + callback, + lib::placeholders::_1 + ) + ); + } + } + + /// Accept the next connection attempt and assign it to con. + /** + * @param tcon The connection to accept into. + * @param callback The function to call when the operation is complete. + */ + void async_accept(transport_con_ptr tcon, accept_handler callback) { + lib::error_code ec; + async_accept(tcon,callback,ec); + if (ec) { throw exception(ec); } + } +protected: + /// Initialize logging + /** + * The loggers are located in the main endpoint class. As such, the + * transport doesn't have direct access to them. This method is called + * by the endpoint constructor to allow shared logging from the transport + * component. These are raw pointers to member variables of the endpoint. + * In particular, they cannot be used in the transport constructor as they + * haven't been constructed yet, and cannot be used in the transport + * destructor as they will have been destroyed by then. + */ + void init_logging(alog_type* a, elog_type* e) { + m_alog = a; + m_elog = e; + } + + void handle_accept(accept_handler callback, lib::asio::error_code const & + asio_ec) + { + lib::error_code ret_ec; + + m_alog->write(log::alevel::devel, "asio::handle_accept"); + + if (asio_ec) { + if (asio_ec == lib::asio::errc::operation_canceled) { + ret_ec = make_error_code(websocketpp::error::operation_canceled); + } else { + log_err(log::elevel::info,"asio handle_accept",asio_ec); + ret_ec = asio_ec; + } + } + + callback(ret_ec); + } + + /// Asio connect timeout handler + /** + * The timer pointer is included to ensure the timer isn't destroyed until + * after it has expired. + * + * @param tcon Pointer to the transport connection that is being connected + * @param con_timer Pointer to the timer in question + * @param callback The function to call back + * @param ec A status code indicating an error, if any. + */ + void handle_connect_timeout(transport_con_ptr tcon, timer_ptr, + connect_handler callback, lib::error_code const & ec) + { + lib::error_code ret_ec; + + if (ec) { + if (ec == transport::error::operation_aborted) { + m_alog->write(log::alevel::devel, + "asio handle_connect_timeout timer cancelled"); + return; + } + + log_err(log::elevel::devel,"asio handle_connect_timeout",ec); + ret_ec = ec; + } else { + ret_ec = make_error_code(transport::error::timeout); + } + + m_alog->write(log::alevel::devel,"TCP connect timed out"); + tcon->cancel_socket_checked(); + callback(ret_ec); + } + + void handle_connect(transport_con_ptr tcon, timer_ptr con_timer, + connect_handler callback, lib::asio::error_code const & ec) + { + if (ec == lib::asio::error::operation_aborted || + lib::asio::is_neg(con_timer->expires_from_now())) + { + m_alog->write(log::alevel::devel,"async_connect cancelled"); + return; + } + + con_timer->cancel(); + + if (ec) { + log_err(log::elevel::info,"asio async_connect",ec); + callback(ec); + return; + } + + if (m_alog->static_test(log::alevel::devel)) { + m_alog->write(log::alevel::devel, + "Async connect to "+tcon->get_remote_endpoint()+" successful."); + } + + callback(lib::error_code()); + } + + /// Initialize a connection + /** + * init is called by an endpoint once for each newly created connection. + * It's purpose is to give the transport policy the chance to perform any + * transport specific initialization that couldn't be done via the default + * constructor. + * + * @param tcon A pointer to the transport portion of the connection. + * + * @return A status code indicating the success or failure of the operation + */ + lib::error_code init(transport_con_ptr tcon) { + m_alog->write(log::alevel::devel, "transport::asio::init"); + + lib::error_code ec; + + ec = tcon->init_asio(m_io_service); + if (ec) {return ec;} + + tcon->set_tcp_pre_init_handler(m_tcp_pre_init_handler); + tcon->set_tcp_post_init_handler(m_tcp_post_init_handler); + + return lib::error_code(); + } +private: + /// Convenience method for logging the code and message for an error_code + template + void log_err(log::level l, char const * msg, error_type const & ec) { + std::stringstream s; + s << msg << " error: " << ec << " (" << ec.message() << ")"; + m_elog->write(l,s.str()); + } + + enum state { + UNINITIALIZED = 0, + READY = 1, + LISTENING = 2 + }; + + // Handlers + tcp_init_handler m_tcp_pre_init_handler; + tcp_init_handler m_tcp_post_init_handler; + + // Network Resources + io_service_ptr m_io_service; + acceptor_ptr m_acceptor; + work_ptr m_work; + + elog_type* m_elog; + alog_type* m_alog; + + // Transport state + state m_state; +}; + +} // namespace asio +} // namespace transport +} // namespace websocketpp From 1b1c307cc5dc988471e1bb5c9833b48d9abdc94d Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 12 Aug 2018 15:55:46 -0400 Subject: [PATCH 009/194] Add configurations for http_plugin's unix socket support Also add customization method for defaults based on nodeos vs keosd --- plugins/http_plugin/http_plugin.cpp | 87 ++++++++++++++----- .../include/eosio/http_plugin/http_plugin.hpp | 16 ++++ programs/keosd/CMakeLists.txt | 3 + programs/keosd/config.hpp.in | 11 +++ programs/keosd/main.cpp | 6 ++ programs/nodeos/main.cpp | 5 ++ 6 files changed, 108 insertions(+), 20 deletions(-) create mode 100644 programs/keosd/config.hpp.in diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 3c79a016c86..2f344d82b87 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -44,6 +44,11 @@ namespace eosio { using std::shared_ptr; using websocketpp::connection_hdl; + static http_plugin_defaults current_http_plugin_defaults; + + void http_plugin::set_defaults(const http_plugin_defaults config) { + current_http_plugin_defaults = config; + } namespace detail { @@ -132,7 +137,6 @@ namespace eosio { size_t max_body_size; websocket_server_type server; - websocket_local_server_type local_server; optional https_listen_endpoint; string https_cert_chain; @@ -140,9 +144,16 @@ namespace eosio { websocket_server_tls_type https_server; + optional unix_endpoint; + websocket_local_server_type unix_server; + bool validate_host; set valid_hosts; + string unix_socket_path_option_name = "unix-socket-path"; + string http_server_address_option_name = "http-server-address"; + string https_server_address_option_name = "https-server-address"; + bool host_port_is_valid( const std::string& header_host_port, const string& endpoint_local_host_port ) { return !validate_host || header_host_port == endpoint_local_host_port || valid_hosts.find(header_host_port) != valid_hosts.end(); } @@ -318,6 +329,13 @@ namespace eosio { valid_hosts.emplace(host + ":" + resolved_port_str); } + void mangle_option_names() { + if(current_http_plugin_defaults.address_config_prefix.empty()) + return; + unix_socket_path_option_name.insert(0, current_http_plugin_defaults.address_config_prefix+"-"); + http_server_address_option_name.insert(0, current_http_plugin_defaults.address_config_prefix+"-"); + https_server_address_option_name.insert(0, current_http_plugin_defaults.address_config_prefix+"-"); + } }; template<> @@ -329,11 +347,23 @@ namespace eosio { http_plugin::~http_plugin(){} void http_plugin::set_program_options(options_description&, options_description& cfg) { - cfg.add_options() - ("http-server-address", bpo::value()->default_value("127.0.0.1:8888"), - "The local IP and port to listen for incoming http connections; set blank to disable.") + my->mangle_option_names(); + if(current_http_plugin_defaults.default_unix_socket_path.length()) + cfg.add_options() + (my->unix_socket_path_option_name.c_str(), bpo::value()->default_value(current_http_plugin_defaults.default_unix_socket_path), + "The filename (relative to data-dir) to create a unix socket for HTTP RPC; set blank to disable."); + + if(current_http_plugin_defaults.default_http_port) + cfg.add_options() + (my->http_server_address_option_name.c_str(), bpo::value()->default_value("127.0.0.1:" + std::to_string(current_http_plugin_defaults.default_http_port)), + "The local IP and port to listen for incoming http connections; set blank to disable."); + else + cfg.add_options() + (my->http_server_address_option_name.c_str(), bpo::value(), + "The local IP and port to listen for incoming http connections; leave blank to disable."); - ("https-server-address", bpo::value(), + cfg.add_options() + (my->https_server_address_option_name.c_str(), bpo::value(), "The local IP and port to listen for incoming https connections; leave blank to disable.") ("https-certificate-chain-file", bpo::value(), @@ -382,8 +412,8 @@ namespace eosio { } tcp::resolver resolver( app().get_io_service()); - if( options.count( "http-server-address" ) && options.at( "http-server-address" ).as().length()) { - string lipstr = options.at( "http-server-address" ).as(); + if( options.count( my->http_server_address_option_name ) && options.at( my->http_server_address_option_name ).as().length()) { + string lipstr = options.at( my->http_server_address_option_name ).as(); string host = lipstr.substr( 0, lipstr.find( ':' )); string port = lipstr.substr( host.size() + 1, lipstr.size()); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str()); @@ -401,7 +431,14 @@ namespace eosio { } } - if( options.count( "https-server-address" ) && options.at( "https-server-address" ).as().length()) { + if( options.count( my->unix_socket_path_option_name ) && !options.at( my->unix_socket_path_option_name ).as().empty()) { + boost::filesystem::path sock_path = options.at(my->unix_socket_path_option_name).as(); + if (sock_path.is_relative()) + sock_path = app().data_dir() / sock_path; + my->unix_endpoint = asio::local::stream_protocol::endpoint(sock_path.string()); + } + + if( options.count( my->https_server_address_option_name ) && options.at( my->https_server_address_option_name ).as().length()) { if( !options.count( "https-certificate-chain-file" ) || options.at( "https-certificate-chain-file" ).as().empty()) { elog( "https-certificate-chain-file is required for HTTPS" ); @@ -413,7 +450,7 @@ namespace eosio { return; } - string lipstr = options.at( "https-server-address" ).as(); + string lipstr = options.at( my->https_server_address_option_name ).as(); string host = lipstr.substr( 0, lipstr.find( ':' )); string port = lipstr.substr( host.size() + 1, lipstr.size()); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str()); @@ -461,17 +498,27 @@ namespace eosio { } } -//disabled until configuration items sorted out -#if 0 - my->local_server.clear_access_channels(websocketpp::log::alevel::all); - my->local_server.init_asio(&app().get_io_service()); - my->local_server.set_max_http_body_size(my->max_body_size); - my->local_server.listen(boost::asio::local::stream_protocol::endpoint("/tmp/test")); - my->local_server.set_http_handler([&](connection_hdl hdl) { - my->handle_http_request( my->local_server.get_con_from_hdl(hdl)); - }); - my->local_server.start_accept(); -#endif + if(my->unix_endpoint) { + try { + my->unix_server.clear_access_channels(websocketpp::log::alevel::all); + my->unix_server.init_asio(&app().get_io_service()); + my->unix_server.set_max_http_body_size(my->max_body_size); + my->unix_server.listen(*my->unix_endpoint); + my->unix_server.set_http_handler([&](connection_hdl hdl) { + my->handle_http_request( my->unix_server.get_con_from_hdl(hdl)); + }); + my->unix_server.start_accept(); + } catch ( const fc::exception& e ){ + elog( "unix socket service failed to start: ${e}", ("e",e.to_detail_string())); + throw; + } catch ( const std::exception& e ){ + elog( "unix socket service failed to start: ${e}", ("e",e.what())); + throw; + } catch (...) { + elog("error thrown from unix socket io service"); + throw; + } + } if(my->https_listen_endpoint) { try { diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index b70768c9671..e78300c6240 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -40,6 +40,19 @@ namespace eosio { */ using api_description = std::map; + struct http_plugin_defaults { + //If not empty, this string is prepended on to the various configuration + // items for setting listen addresses + string address_config_prefix; + //If empty, unix socket support will be completely disabled. If not empty, + // unix socket support is enabled with the given default path (treated relative + // to the datadir) + string default_unix_socket_path; + //If non 0, HTTP will be enabled by default on the given port number. If + // 0, HTTP will not be enabled by default + uint16_t default_http_port{0}; + }; + /** * This plugin starts an HTTP server and dispatches queries to * registered handles based upon URL. The handler is passed the @@ -60,6 +73,9 @@ namespace eosio { http_plugin(); virtual ~http_plugin(); + //must be called before initialize + static void set_defaults(const http_plugin_defaults config); + APPBASE_PLUGIN_REQUIRES() virtual void set_program_options(options_description&, options_description& cfg) override; diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index ac434e92c71..a332f8e26b1 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -9,11 +9,14 @@ if( GPERFTOOLS_FOUND ) list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) endif() +configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) + target_link_libraries( ${KEY_STORE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE wallet_api_plugin wallet_plugin PRIVATE http_plugin PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) +target_include_directories(${KEY_STORE_EXECUTABLE_NAME} PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) mas_sign(${KEY_STORE_EXECUTABLE_NAME}) diff --git a/programs/keosd/config.hpp.in b/programs/keosd/config.hpp.in new file mode 100644 index 00000000000..a7d34f390dc --- /dev/null +++ b/programs/keosd/config.hpp.in @@ -0,0 +1,11 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + * + * \warning This file is machine generated. DO NOT EDIT. See config.hpp.in for changes. + */ +#pragma once + +namespace eosio { namespace keosd { namespace config { + const string key_store_executable_name = "${KEY_STORE_EXECUTABLE_NAME}"; +}}} diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index ce1588fccd6..cdc2300a76b 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -14,6 +14,7 @@ #include #include +#include "config.hpp" using namespace appbase; using namespace eosio; @@ -39,6 +40,11 @@ int main(int argc, char** argv) bfs::path home = determine_home_directory(); app().set_default_data_dir(home / "eosio-wallet"); app().set_default_config_dir(home / "eosio-wallet"); + http_plugin::set_defaults({ + .address_config_prefix = keosd::config::key_store_executable_name, + .default_unix_socket_path = keosd::config::key_store_executable_name + ".sock", + .default_http_port = 0 + }); app().register_plugin(); if(!app().initialize(argc, argv)) return -1; diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 02c55ae9ff5..f17aa231105 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -99,6 +99,11 @@ int main(int argc, char** argv) auto root = fc::app_path(); app().set_default_data_dir(root / "eosio/nodeos/data" ); app().set_default_config_dir(root / "eosio/nodeos/config" ); + http_plugin::set_defaults({ + .address_config_prefix = "", + .default_unix_socket_path = "", + .default_http_port = 8888 + }); if(!app().initialize(argc, argv)) return INITIALIZE_FAIL; initialize_logging(); From d269cd46175212f8f79c6f044d478abc108161dc Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 12 Aug 2018 16:15:29 -0400 Subject: [PATCH 010/194] Make cleos use keosd via unix socket by default --- programs/cleos/main.cpp | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index e478f42a2bd..85a7557fc08 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -72,6 +72,7 @@ Usage: ./cleos create account [OPTIONS] creator name OwnerKey ActiveKey ``` */ +#include #include #include #include @@ -148,8 +149,24 @@ FC_DECLARE_EXCEPTION( localized_exception, 10000000, "an error occured" ); FC_MULTILINE_MACRO_END \ ) +//copy pasta from keosd's main.cpp +bfs::path determine_home_directory() +{ + bfs::path home; + struct passwd* pwd = getpwuid(getuid()); + if(pwd) { + home = pwd->pw_dir; + } + else { + home = getenv("HOME"); + } + if(home.empty()) + home = "./"; + return home; +} + string url = "http://127.0.0.1:8888/"; -string wallet_url = "http://127.0.0.1:8900/"; +string wallet_url = "unix://" + (determine_home_directory() / "eosio-wallet" / (string(key_store_executable_name) + ".sock")).string(); bool no_verify = false; vector headers; From aa8a90dc40a634ddf62f03c77280dc36107f8913 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 15 Aug 2018 16:00:23 -0400 Subject: [PATCH 011/194] Allow producer plugin to specify unix communication for keosd --- plugins/producer_plugin/producer_plugin.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0200eb8f8ba..262a884fd4f 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -533,7 +534,13 @@ make_key_signature_provider(const private_key_type& key) { static producer_plugin_impl::signature_provider_type make_keosd_signature_provider(const std::shared_ptr& impl, const string& url_str, const public_key_type pubkey) { - auto keosd_url = fc::url(url_str); + fc::url keosd_url; + if(boost::algorithm::starts_with(url_str, "unix://")) + //send the entire string after unix:// to http_plugin. It'll auto-detect which part + // is the unix socket path, and which part is the url to hit on the server + keosd_url = fc::url("unix", url_str.substr(7), ostring(), ostring(), ostring(), ostring(), ovariant_object(), fc::optional()); + else + keosd_url = fc::url(url_str); std::weak_ptr weak_impl = impl; return [weak_impl, keosd_url, pubkey]( const chain::digest_type& digest ) { From bcfbdfd9f45d7585b2d743fba9fe25164385f02f Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sat, 25 Aug 2018 20:55:11 -0400 Subject: [PATCH 012/194] Regress a few changes from UNIX socket branch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Regress a few changes from this branch until unix socket support is fully embraced — * Maintain current keosd *-server-address config names * Have keosd start HTTP server by default still (though this time on 8900) * Change cleos default URL back to http://127.0.0.1:8900 --- programs/cleos/main.cpp | 2 +- programs/keosd/main.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 85a7557fc08..adadd2c8852 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -166,7 +166,7 @@ bfs::path determine_home_directory() } string url = "http://127.0.0.1:8888/"; -string wallet_url = "unix://" + (determine_home_directory() / "eosio-wallet" / (string(key_store_executable_name) + ".sock")).string(); +string wallet_url = "http://127.0.0.1:8900/"; bool no_verify = false; vector headers; diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index cdc2300a76b..58a42d96b30 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -41,9 +41,9 @@ int main(int argc, char** argv) app().set_default_data_dir(home / "eosio-wallet"); app().set_default_config_dir(home / "eosio-wallet"); http_plugin::set_defaults({ - .address_config_prefix = keosd::config::key_store_executable_name, + .address_config_prefix = "", .default_unix_socket_path = keosd::config::key_store_executable_name + ".sock", - .default_http_port = 0 + .default_http_port = 8900 }); app().register_plugin(); if(!app().initialize(argc, argv)) From 17e574b3df82c61d51dfeb8a640552cc860a3ed0 Mon Sep 17 00:00:00 2001 From: Scott Sallinen Date: Sun, 26 Aug 2018 15:26:38 -0700 Subject: [PATCH 013/194] Initial get code hash --- plugins/chain_api_plugin/chain_api_plugin.cpp | 1 + plugins/chain_plugin/chain_plugin.cpp | 13 +++++++++++++ .../include/eosio/chain_plugin/chain_plugin.hpp | 12 ++++++++++++ 3 files changed, 26 insertions(+) diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 58098501f04..b6d36d7fa00 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -85,6 +85,7 @@ void chain_api_plugin::plugin_startup() { CHAIN_RO_CALL(get_block_header_state, 200), CHAIN_RO_CALL(get_account, 200), CHAIN_RO_CALL(get_code, 200), + CHAIN_RO_CALL(get_code_hash, 200), CHAIN_RO_CALL(get_abi, 200), CHAIN_RO_CALL(get_raw_code_and_abi, 200), CHAIN_RO_CALL(get_table_rows, 200), diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 58295d52da8..d7240ecd6e7 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1496,6 +1496,19 @@ read_only::get_code_results read_only::get_code( const get_code_params& params ) return result; } +read_only::get_code_hash_results read_only::get_code_hash( const get_code_hash_params& params )const { + get_code_hash_results result; + result.account_name = params.account_name; + const auto& d = db.db(); + const auto& accnt = d.get( params.account_name ); + + if( accnt.code.size() ) { + result.code_hash = fc::sha256::hash( accnt.code.data(), accnt.code.size() ); + } + + return result; +} + read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const get_raw_code_and_abi_params& params)const { get_raw_code_and_abi_results result; result.account_name = params.account_name; diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 2fd665d6255..4d1abd7ede8 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -152,6 +152,15 @@ class read_only { bool code_as_wasm = false; }; + struct get_code_hash_results { + name account_name; + fc::sha256 code_hash; + }; + + struct get_code_hash_params { + name account_name; + }; + struct get_abi_results { name account_name; optional abi; @@ -173,6 +182,7 @@ class read_only { get_code_results get_code( const get_code_params& params )const; + get_code_hash_results get_code_hash( const get_code_hash_params& params )const; get_abi_results get_abi( const get_abi_params& params )const; get_raw_code_and_abi_results get_raw_code_and_abi( const get_raw_code_and_abi_params& params)const; @@ -642,9 +652,11 @@ FC_REFLECT( eosio::chain_apis::read_only::get_account_results, (core_liquid_balance)(ram_quota)(net_weight)(cpu_weight)(net_limit)(cpu_limit)(ram_usage)(permissions) (total_resources)(self_delegated_bandwidth)(refund_request)(voter_info) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_results, (account_name)(code_hash)(wast)(wasm)(abi) ) +FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_results, (account_name)(code_hash) ) FC_REFLECT( eosio::chain_apis::read_only::get_abi_results, (account_name)(abi) ) FC_REFLECT( eosio::chain_apis::read_only::get_account_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_params, (account_name)(code_as_wasm) ) +FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_abi_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_raw_code_and_abi_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_raw_code_and_abi_results, (account_name)(wasm)(abi) ) From 2bf701fc5314b8b717a038f799553658d0cb66c7 Mon Sep 17 00:00:00 2001 From: Jerry <1032246642@qq.com> Date: Mon, 27 Aug 2018 19:37:55 +0800 Subject: [PATCH 014/194] add cleos set contract/code/abi --clear --- programs/cleos/main.cpp | 77 ++++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index e478f42a2bd..f47e6591cb9 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -553,12 +553,12 @@ chain::action create_transfer(const string& contract, const name& sender, const }; } -chain::action create_setabi(const name& account, const abi_def& abi) { +chain::action create_setabi(const name& account, const bytes& abi) { return action { tx_permission.empty() ? vector{{account,config::active_name}} : get_account_permissions(tx_permission), setabi{ .account = account, - .abi = fc::raw::pack(abi) + .abi = abi } }; } @@ -2248,45 +2248,57 @@ int main( int argc, char** argv ) { string wasmPath; string abiPath; bool shouldSend = true; + bool contract_clear = false; auto codeSubcommand = setSubcommand->add_subcommand("code", localized("Create or update the code on an account")); codeSubcommand->add_option("account", account, localized("The account to set code for"))->required(); - codeSubcommand->add_option("code-file", wasmPath, localized("The fullpath containing the contract WASM"))->required(); + codeSubcommand->add_option("code-file", wasmPath, localized("The fullpath containing the contract WASM"));//->required(); + codeSubcommand->add_flag( "-c,--clear", contract_clear, localized("Remove code on an account")); auto abiSubcommand = setSubcommand->add_subcommand("abi", localized("Create or update the abi on an account")); abiSubcommand->add_option("account", account, localized("The account to set the ABI for"))->required(); - abiSubcommand->add_option("abi-file", abiPath, localized("The fullpath containing the contract ABI"))->required(); + abiSubcommand->add_option("abi-file", abiPath, localized("The fullpath containing the contract ABI"));//->required(); + abiSubcommand->add_flag( "-c,--clear", contract_clear, localized("Remove abi on an account")); auto contractSubcommand = setSubcommand->add_subcommand("contract", localized("Create or update the contract on an account")); contractSubcommand->add_option("account", account, localized("The account to publish a contract for")) ->required(); - contractSubcommand->add_option("contract-dir", contractPath, localized("The path containing the .wasm and .abi")) - ->required(); + contractSubcommand->add_option("contract-dir", contractPath, localized("The path containing the .wasm and .abi")); + // ->required(); contractSubcommand->add_option("wasm-file", wasmPath, localized("The file containing the contract WASM relative to contract-dir")); // ->check(CLI::ExistingFile); auto abi = contractSubcommand->add_option("abi-file,-a,--abi", abiPath, localized("The ABI for the contract relative to contract-dir")); // ->check(CLI::ExistingFile); + contractSubcommand->add_flag( "-c,--clear", contract_clear, localized("Rmove contract on an account")); std::vector actions; auto set_code_callback = [&]() { - std::string wasm; - fc::path cpath(contractPath); + bytes code_bytes; + if(!contract_clear){ + std::string wasm; + fc::path cpath(contractPath); + + if( cpath.filename().generic_string() == "." ) cpath = cpath.parent_path(); + + if( wasmPath.empty() ) + wasmPath = (cpath / (cpath.filename().generic_string()+".wasm")).generic_string(); + else + wasmPath = (cpath / wasmPath).generic_string(); - if( cpath.filename().generic_string() == "." ) cpath = cpath.parent_path(); + std::cerr << localized(("Reading WASM from " + wasmPath + "...").c_str()) << std::endl; + fc::read_file_contents(wasmPath, wasm); + EOS_ASSERT( !wasm.empty(), wast_file_not_found, "no wasm file found ${f}", ("f", wasmPath) ); - if( wasmPath.empty() ) - wasmPath = (cpath / (cpath.filename().generic_string()+".wasm")).generic_string(); - else - wasmPath = (cpath / wasmPath).generic_string(); + const string binary_wasm_header("\x00\x61\x73\x6d\x01\x00\x00\x00", 8); + if(wasm.compare(0, 8, binary_wasm_header)) + std::cerr << localized("WARNING: ") << wasmPath << localized(" doesn't look like a binary WASM file. Is it something else, like WAST? Trying anyways...") << std::endl; + code_bytes = bytes(wasm.begin(), wasm.end()); - std::cerr << localized(("Reading WASM from " + wasmPath + "...").c_str()) << std::endl; - fc::read_file_contents(wasmPath, wasm); - EOS_ASSERT( !wasm.empty(), wast_file_not_found, "no wasm file found ${f}", ("f", wasmPath) ); + } else { + code_bytes = bytes(); + } - const string binary_wasm_header("\x00\x61\x73\x6d\x01\x00\x00\x00", 8); - if(wasm.compare(0, 8, binary_wasm_header)) - std::cerr << localized("WARNING: ") << wasmPath << localized(" doesn't look like a binary WASM file. Is it something else, like WAST? Trying anyways...") << std::endl; - actions.emplace_back( create_setcode(account, bytes(wasm.begin(), wasm.end()) ) ); + actions.emplace_back( create_setcode(account, code_bytes ) ); if ( shouldSend ) { std::cerr << localized("Setting Code...") << std::endl; send_actions(std::move(actions), 10000, packed_transaction::zlib); @@ -2294,19 +2306,27 @@ int main( int argc, char** argv ) { }; auto set_abi_callback = [&]() { - fc::path cpath(contractPath); - if( cpath.filename().generic_string() == "." ) cpath = cpath.parent_path(); + bytes abi_bytes; + if(!contract_clear){ + fc::path cpath(contractPath); + if( cpath.filename().generic_string() == "." ) cpath = cpath.parent_path(); + + if( abiPath.empty() ) { + abiPath = (cpath / (cpath.filename().generic_string()+".abi")).generic_string(); + } else { + abiPath = (cpath / abiPath).generic_string(); + } + + EOS_ASSERT( fc::exists( abiPath ), abi_file_not_found, "no abi file found ${f}", ("f", abiPath) ); + + abi_bytes = fc::raw::pack(fc::json::from_file(abiPath).as()); - if( abiPath.empty() ) { - abiPath = (cpath / (cpath.filename().generic_string()+".abi")).generic_string(); } else { - abiPath = (cpath / abiPath).generic_string(); + abi_bytes = bytes(); } - EOS_ASSERT( fc::exists( abiPath ), abi_file_not_found, "no abi file found ${f}", ("f", abiPath) ); - try { - actions.emplace_back( create_setabi(account, fc::json::from_file(abiPath).as()) ); + actions.emplace_back( create_setabi(account, abi_bytes) ); } EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON") if ( shouldSend ) { std::cerr << localized("Setting ABI...") << std::endl; @@ -2318,6 +2338,7 @@ int main( int argc, char** argv ) { add_standard_transaction_options(codeSubcommand, "account@active"); add_standard_transaction_options(abiSubcommand, "account@active"); contractSubcommand->set_callback([&] { + if(!contract_clear) EOS_ASSERT( !contractPath.empty(), contract_exception, " contract-dir is null ", ("f", contractPath) ); shouldSend = false; set_code_callback(); set_abi_callback(); From cbbfc75d907851c067179c911a7ce738b2d830e2 Mon Sep 17 00:00:00 2001 From: Travis McLane Date: Mon, 27 Aug 2018 10:21:31 -0400 Subject: [PATCH 015/194] stop creating anonymous volumes --- Docker/Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Docker/Dockerfile b/Docker/Dockerfile index 92867dae89c..ffde4a65c07 100644 --- a/Docker/Dockerfile +++ b/Docker/Dockerfile @@ -21,5 +21,4 @@ COPY --from=builder /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh ENV EOSIO_ROOT=/opt/eosio RUN chmod +x /opt/eosio/bin/nodeosd.sh ENV LD_LIBRARY_PATH /usr/local/lib -VOLUME /opt/eosio/bin/data-dir ENV PATH /opt/eosio/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin From 265132ede8fad50aa2ada6470450596b6264d5de Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 27 Aug 2018 11:26:24 -0500 Subject: [PATCH 016/194] New test to ensure require_recipient to generator is ignored --- contracts/test_api/test_action.cpp | 11 +++++++++++ contracts/test_api/test_api.cpp | 1 + contracts/test_api/test_api.hpp | 1 + unittests/api_tests.cpp | 30 ++++++++++++++++++++++++++++++ 4 files changed, 43 insertions(+) diff --git a/contracts/test_api/test_action.cpp b/contracts/test_api/test_action.cpp index adc517453fa..74c90330bf0 100644 --- a/contracts/test_api/test_action.cpp +++ b/contracts/test_api/test_action.cpp @@ -174,6 +174,17 @@ void test_action::require_notice(uint64_t receiver, uint64_t code, uint64_t acti eosio_assert(false, "Should've failed"); } +void test_action::require_notice_tests(uint64_t receiver, uint64_t code, uint64_t action) { + eosio::print( "require_notice_tests" ); + if( receiver == N( testapi ) ) { + eosio::print( "require_recipient( N(acc5) )" ); + eosio::require_recipient( N( acc5 ) ); + } else if( receiver == N( acc5 ) ) { + eosio::print( "require_recipient( N(testapi) )" ); + eosio::require_recipient( N( testapi ) ); + } +} + void test_action::require_auth() { prints("require_auth"); eosio::require_auth( N(acc3) ); diff --git a/contracts/test_api/test_api.cpp b/contracts/test_api/test_api.cpp index 3bccd9a4a8c..e900dfd82fd 100644 --- a/contracts/test_api/test_api.cpp +++ b/contracts/test_api/test_api.cpp @@ -71,6 +71,7 @@ extern "C" { WASM_TEST_HANDLER(test_action, read_action_to_0); WASM_TEST_HANDLER(test_action, read_action_to_64k); WASM_TEST_HANDLER_EX(test_action, require_notice); + WASM_TEST_HANDLER_EX(test_action, require_notice_tests); WASM_TEST_HANDLER(test_action, require_auth); WASM_TEST_HANDLER(test_action, assert_false); WASM_TEST_HANDLER(test_action, assert_true); diff --git a/contracts/test_api/test_api.hpp b/contracts/test_api/test_api.hpp index b54f12bff02..18368932bcf 100644 --- a/contracts/test_api/test_api.hpp +++ b/contracts/test_api/test_api.hpp @@ -64,6 +64,7 @@ struct test_action { static void test_dummy_action(); static void test_cf_action(); static void require_notice(uint64_t receiver, uint64_t code, uint64_t action); + static void require_notice_tests(uint64_t receiver, uint64_t code, uint64_t action); static void require_auth(); static void assert_false(); static void assert_true(); diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 63eda3116b0..9ac15cc4473 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -437,6 +437,36 @@ BOOST_FIXTURE_TEST_CASE(action_tests, TESTER) { try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } +// test require_recipient loop (doesn't cause infinite loop) +BOOST_FIXTURE_TEST_CASE(require_notice_tests, TESTER) { try { + produce_blocks(2); + create_account( N(testapi) ); + create_account( N(acc5) ); + produce_blocks(1); + set_code( N(testapi), test_api_wast ); + set_code( N(acc5), test_api_wast ); + produce_blocks(1); + + // test require_notice + auto scope = std::vector{N(testapi)}; + auto test_require_notice = [this](auto& test, std::vector& scope){ + signed_transaction trx; + auto tm = test_api_action{}; + + action act(std::vector{{N(testapi), config::active_name}}, tm); + trx.actions.push_back(act); + + test.set_transaction_headers(trx); + trx.sign(test.get_private_key(N(testapi), "active"), control->get_chain_id()); + auto res = test.push_transaction(trx); + BOOST_CHECK_EQUAL(res->receipt->status, transaction_receipt::executed); + }; + + test_require_notice(*this, scope); // no exception expected + + } FC_LOG_AND_RETHROW() } + + /************************************************************************************* * context free action tests *************************************************************************************/ From 6798ca9e5f1f9639784c1635728d7c506d39f121 Mon Sep 17 00:00:00 2001 From: Travis McLane Date: Mon, 27 Aug 2018 12:56:49 -0400 Subject: [PATCH 017/194] remove anonymous volume from dev container --- Docker/dev/Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Docker/dev/Dockerfile b/Docker/dev/Dockerfile index 2b7bf84987f..7df9a182b37 100644 --- a/Docker/dev/Dockerfile +++ b/Docker/dev/Dockerfile @@ -14,5 +14,4 @@ RUN pip3 install numpy ENV EOSIO_ROOT=/opt/eosio RUN chmod +x /opt/eosio/bin/nodeosd.sh ENV LD_LIBRARY_PATH /usr/local/lib -VOLUME /opt/eosio/bin/data-dir ENV PATH /opt/eosio/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin From f5ccf430d30d856df8c449e2833ac166b560a417 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 27 Aug 2018 17:14:01 -0500 Subject: [PATCH 018/194] Correct cleos get table help text from 'contract' to 'account'. --- programs/cleos/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index e478f42a2bd..f9af7ab84d8 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -1971,7 +1971,7 @@ int main( int argc, char** argv ) { uint32_t limit = 10; string index_position; auto getTable = get->add_subcommand( "table", localized("Retrieve the contents of a database table"), false); - getTable->add_option( "contract", code, localized("The contract who owns the table") )->required(); + getTable->add_option( "account", code, localized("The account who owns the table") )->required(); getTable->add_option( "scope", scope, localized("The scope within the contract in which the table is found") )->required(); getTable->add_option( "table", table, localized("The name of the table as specified by the contract abi") )->required(); getTable->add_option( "-b,--binary", binary, localized("Return the value as BINARY rather than using abi to interpret as JSON") ); From 505df507524aeef9950f4bca9abe6b34f29504ec Mon Sep 17 00:00:00 2001 From: Travis McLane Date: Tue, 28 Aug 2018 12:46:07 -0500 Subject: [PATCH 019/194] ensure data-dir exists --- Docker/nodeosd.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Docker/nodeosd.sh b/Docker/nodeosd.sh index b808ccad460..870548d6b6b 100755 --- a/Docker/nodeosd.sh +++ b/Docker/nodeosd.sh @@ -1,6 +1,10 @@ #!/bin/sh cd /opt/eosio/bin +if [ ! -d "/opt/eosio/bin/data-dir" ]; then + mkdir /opt/eosio/bin/data-dir +fi + if [ -f '/opt/eosio/bin/data-dir/config.ini' ]; then echo else From 184d24b74879ae516b2fb5bd520ef3fcc3d27f16 Mon Sep 17 00:00:00 2001 From: Marlon Williams Date: Tue, 28 Aug 2018 10:09:51 -0700 Subject: [PATCH 020/194] Simplify Wallet Tools EOSIO Blockchain Detection --- plugins/chain_plugin/chain_plugin.cpp | 2 ++ .../chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 58295d52da8..e724a2e65b3 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -947,6 +947,8 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params //std::bitset<64>(db.get_dynamic_global_properties().recent_slots_filled).to_string(), //__builtin_popcountll(db.get_dynamic_global_properties().recent_slots_filled) / 64.0, app().version_string(), + symbol().name(), + symbol().precision(), }; } diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 2fd665d6255..9eff81faa3d 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -97,6 +97,8 @@ class read_only { //string recent_slots; //double participation_rate = 0; optional server_version_string; + string core_symbol; + uint64_t precision; }; get_info_results get_info(const get_info_params&) const; @@ -615,7 +617,7 @@ class chain_plugin : public plugin { FC_REFLECT( eosio::chain_apis::permission, (perm_name)(parent)(required_auth) ) FC_REFLECT(eosio::chain_apis::empty, ) FC_REFLECT(eosio::chain_apis::read_only::get_info_results, -(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string) ) +(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string)(core_symbol)(precision) ) FC_REFLECT(eosio::chain_apis::read_only::get_block_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_num_or_id)) From 33b1f074484cbab0ec0eeb87b8e1fa8c5b8a3914 Mon Sep 17 00:00:00 2001 From: Marlon Williams Date: Tue, 28 Aug 2018 14:53:50 -0700 Subject: [PATCH 021/194] Update chain_plugin.hpp Changing precision to core_symbol_precision per request --- .../chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 9eff81faa3d..891f63b4f70 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -98,7 +98,7 @@ class read_only { //double participation_rate = 0; optional server_version_string; string core_symbol; - uint64_t precision; + uint64_t core_symbol_precision; }; get_info_results get_info(const get_info_params&) const; @@ -617,7 +617,7 @@ class chain_plugin : public plugin { FC_REFLECT( eosio::chain_apis::permission, (perm_name)(parent)(required_auth) ) FC_REFLECT(eosio::chain_apis::empty, ) FC_REFLECT(eosio::chain_apis::read_only::get_info_results, -(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string)(core_symbol)(precision) ) +(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string)(core_symbol)(core_symbol_precision) ) FC_REFLECT(eosio::chain_apis::read_only::get_block_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_num_or_id)) From b16050a112fdbed32110ead8a6fcfbc29f89bd72 Mon Sep 17 00:00:00 2001 From: Paul Calabrese Date: Wed, 29 Aug 2018 09:21:20 -0500 Subject: [PATCH 022/194] Add testing for additional launcher features --- plugins/history_plugin/history_plugin.cpp | 2 +- programs/eosio-launcher/main.cpp | 93 ++++++-- scripts/eosio-tn_bounce.sh | 2 +- scripts/eosio-tn_roll.sh | 4 +- scripts/eosio-tn_up.sh | 12 +- tests/CMakeLists.txt | 1 + tests/Cluster.py | 142 +++++++++++- tests/Node.py | 2 +- tests/launcher_test.py | 249 ++++++++++++++++++++++ tests/nodeos_run_test.py | 4 +- tests/nodeos_under_min_avail_ram.py | 2 +- tests/nodeos_voting_test.py | 2 +- 12 files changed, 471 insertions(+), 44 deletions(-) create mode 100755 tests/launcher_test.py diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index ea0588aa9fc..6888f819b2a 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -313,7 +313,7 @@ namespace eosio { if( options.count( "filter-on" )) { auto fo = options.at( "filter-on" ).as>(); for( auto& s : fo ) { - if( s == "*" ) { + if( s == "*" || s == "\"*\"" ) { my->bypass_filter = true; wlog( "--filter-on * enabled. This can fill shared_mem, causing nodeos to stop." ); break; diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 0fe9652d49b..d5ae379b4b0 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -17,6 +17,7 @@ #pragma GCC diagnostic ignored "-Wunused-result" #include #pragma GCC diagnostic pop +#include #include #include #include @@ -162,7 +163,7 @@ class host_def { return base_http_port - 100; } - bool is_local( ) { + bool is_local( ) const { return local_id.contains( host_name ); } @@ -1650,14 +1651,30 @@ launcher_def::bounce (const string& node_numbers) { const host_def& host = node_pair.first; const eosd_def& node = node_pair.second; string node_num = node.name.substr( node.name.length() - 2 ); - string cmd = "cd " + host.eosio_home + "; " - + "export EOSIO_HOME=" + host.eosio_home + string("; ") - + "export EOSIO_NODE=" + node_num + "; " - + "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; cout << "Bouncing " << node.name << endl; - if (!do_ssh(cmd, host.host_name)) { - cerr << "Unable to bounce " << node.name << endl; - exit (-1); + if (!host.is_local()) { + string cmd = "cd " + host.eosio_home + "; " + + "export EOSIO_HOME=" + host.eosio_home + string("; ") + + "export EOSIO_NODE=" + node_num + "; " + + "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; + if (!do_ssh(cmd, host.host_name)) { + cerr << "Unable to bounce " << node.name << endl; + exit (-1); + } + } + else { + string cmd = "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; + bp::child c(cmd, + bp::env["EOSIO_HOME"] = host.eosio_home, + bp::env["EOSIO_NODE"] = node_num ); + + if(!c.running()) { + cerr << "child not running after spawn " << cmd << endl; + for (int i = 0; i > 0; i++) { + if (c.running () ) break; + } + } + c.wait(); } } } @@ -1669,15 +1686,32 @@ launcher_def::down (const string& node_numbers) { const host_def& host = node_pair.first; const eosd_def& node = node_pair.second; string node_num = node.name.substr( node.name.length() - 2 ); - string cmd = "cd " + host.eosio_home + "; " - + "export EOSIO_HOME=" + host.eosio_home + "; " - + "export EOSIO_NODE=" + node_num + "; " - + "export EOSIO_TN_RESTART_CONFIG_DIR=" + node.config_dir_name + "; " - + "./scripts/eosio-tn_down.sh"; cout << "Taking down " << node.name << endl; - if (!do_ssh(cmd, host.host_name)) { - cerr << "Unable to down " << node.name << endl; - exit (-1); + if (!host.is_local()) { + string cmd = "cd " + host.eosio_home + "; " + + "export EOSIO_HOME=" + host.eosio_home + "; " + + "export EOSIO_NODE=" + node_num + "; " + + "export EOSIO_TN_RESTART_CONFIG_DIR=" + node.config_dir_name + "; " + + "./scripts/eosio-tn_down.sh"; + if (!do_ssh(cmd, host.host_name)) { + cerr << "Unable to down " << node.name << endl; + exit (-1); + } + } + else { + string cmd = "./scripts/eosio-tn_down.sh "; + bp::child c(cmd, + bp::env["EOSIO_HOME"] = host.eosio_home, + bp::env["EOSIO_NODE"] = node_num, + bp::env["EOSIO_TN_RESTART_CONFIG_DIR"] = node.config_dir_name ); + + if(!c.running()) { + cerr << "child not running after spawn " << cmd << endl; + for (int i = 0; i > 0; i++) { + if (c.running () ) break; + } + } + c.wait(); } } } @@ -1689,12 +1723,27 @@ launcher_def::roll (const string& host_names) { for (string host_name: hosts) { cout << "Rolling " << host_name << endl; auto host = find_host_by_name_or_address(host_name); - string cmd = "cd " + host->eosio_home + "; " - + "export EOSIO_HOME=" + host->eosio_home + "; " - + "./scripts/eosio-tn_roll.sh"; - if (!do_ssh(cmd, host_name)) { - cerr << "Unable to roll " << host << endl; - exit (-1); + if (!host->is_local()) { + string cmd = "cd " + host->eosio_home + "; " + + "export EOSIO_HOME=" + host->eosio_home + "; " + + "./scripts/eosio-tn_roll.sh"; + if (!do_ssh(cmd, host_name)) { + cerr << "Unable to roll " << host << endl; + exit (-1); + } + } + else { + string cmd = "./scripts/eosio-tn_roll.sh "; + bp::child c(cmd, + bp::env["EOSIO_HOME"] = host->eosio_home ); + + if(!c.running()) { + cerr << "child not running after spawn " << cmd << endl; + for (int i = 0; i > 0; i++) { + if (c.running () ) break; + } + } + c.wait(); } } } diff --git a/scripts/eosio-tn_bounce.sh b/scripts/eosio-tn_bounce.sh index 7062836c92c..55ef1d78159 100755 --- a/scripts/eosio-tn_bounce.sh +++ b/scripts/eosio-tn_bounce.sh @@ -41,4 +41,4 @@ else fi bash $EOSIO_HOME/scripts/eosio-tn_down.sh -bash $EOSIO_HOME/scripts/eosio-tn_up.sh $* +bash $EOSIO_HOME/scripts/eosio-tn_up.sh "$*" diff --git a/scripts/eosio-tn_roll.sh b/scripts/eosio-tn_roll.sh index 7c8f665c880..1b131edb0fa 100755 --- a/scripts/eosio-tn_roll.sh +++ b/scripts/eosio-tn_roll.sh @@ -82,10 +82,10 @@ cp $SDIR/$RD/$prog $RD/$prog if [ $DD = "all" ]; then for EOSIO_RESTART_DATA_DIR in `ls -d var/lib/node_??`; do - bash $EOSIO_HOME/scripts/eosio-tn_up.sh $* + bash $EOSIO_HOME/scripts/eosio-tn_up.sh "$*" done else - bash $EOSIO_HOME/scripts/eosio-tn_up.sh $* + bash $EOSIO_HOME/scripts/eosio-tn_up.sh "$*" fi unset EOSIO_RESTART_DATA_DIR diff --git a/scripts/eosio-tn_up.sh b/scripts/eosio-tn_up.sh index 895322a5eee..058ab16ed90 100755 --- a/scripts/eosio-tn_up.sh +++ b/scripts/eosio-tn_up.sh @@ -9,6 +9,8 @@ connected="0" rundir=programs/nodeos prog=nodeos +# Quote any args that are "*", so they are not expanded +qargs=`echo "$*" | sed -e 's/ \* / "*" /' -e 's/ \*$/ "*"/'` if [ "$PWD" != "$EOSIO_HOME" ]; then echo $0 must only be run from $EOSIO_HOME @@ -33,8 +35,8 @@ rm $datadir/stderr.txt ln -s $log $datadir/stderr.txt relaunch() { - echo "$rundir/$prog $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log " - nohup $rundir/$prog $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log & + echo "$rundir/$prog $qargs $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log " + nohup $rundir/$prog $qargs $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log & pid=$! echo pid = $pid echo $pid > $datadir/$prog.pid @@ -56,7 +58,7 @@ relaunch() { if [ -z "$EOSIO_LEVEL" ]; then echo starting with no modifiers - relaunch $* + relaunch if [ "$connected" -eq 0 ]; then EOSIO_LEVEL=replay else @@ -66,7 +68,7 @@ fi if [ "$EOSIO_LEVEL" == replay ]; then echo starting with replay - relaunch $* --hard-replay-blockchain + relaunch --hard-replay-blockchain if [ "$connected" -eq 0 ]; then EOSIO_LEVEL=resync else @@ -75,5 +77,5 @@ if [ "$EOSIO_LEVEL" == replay ]; then fi if [ "$EOSIO_LEVEL" == resync ]; then echo starting with delete-all-blocks - relaunch $* --delete-all-blocks + relaunch --delete-all-blocks fi diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 86c6e97cb36..e70ce002336 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -40,6 +40,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_under_min_avail_ram.py ${CMAKE configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_voting_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) #To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output) diff --git a/tests/Cluster.py b/tests/Cluster.py index 73be2edde19..a3554551220 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -30,6 +30,7 @@ class Cluster(object): __localHost="localhost" __BiosHost="localhost" __BiosPort=8788 + __LauncherCmdArr=[] # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -97,14 +98,19 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-branches # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontKill=False - , dontBootstrap=False, totalProducers=None, extraNodeosArgs=None): + , dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count - prodCount: producers per prodcuer node count + prodCount: producers per producer node count topo: cluster topology (as defined by launcher) delay: delay between individual nodes launch (as defined by launcher) delay 0 exposes a bootstrap bug where producer handover may have a large gap confusing nodes and bringing system to a halt. + dontBootstrap: When true, don't do any bootstrapping at all. + onlyBios: When true, only loads the bios contract (and not more full bootstrapping). + useBiosBootFile: determines which of two bootstrap methods is used (when both dontBootstrap and onlyBios are false). + The default value of true uses the bios_boot.sh file generated by the launcher. + A value of false uses manual bootstrapping in this script, which does not do things like stake votes for producers. """ if not self.localCluster: Utils.Print("WARNING: Cluster not local, not launching %s." % (Utils.EosServerName)) @@ -144,6 +150,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--nodeos") cmdArr.append(nodeosArgs) + Cluster.__LauncherCmdArr = cmdArr.copy() s=" ".join(cmdArr) if Utils.Debug: Utils.Print("cmd: %s" % (s)) if 0 != subprocess.call(cmdArr): @@ -180,10 +187,16 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne return True Utils.Print("Bootstrap cluster.") - self.biosNode=Cluster.bootstrap(totalNodes, prodCount, Cluster.__BiosHost, Cluster.__BiosPort, dontKill, onlyBios) - if self.biosNode is None: - Utils.Print("ERROR: Bootstrap failed.") - return False + if onlyBios or not useBiosBootFile: + self.biosNode=Cluster.bootstrap(totalNodes, prodCount, Cluster.__BiosHost, Cluster.__BiosPort, dontKill, onlyBios) + if self.biosNode is None: + Utils.Print("ERROR: Bootstrap failed.") + return False + else: + self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, dontKill) + if self.biosNode is None: + Utils.Print("ERROR: Bootstrap failed.") + return False # validate iniX accounts can be retrieved @@ -572,7 +585,7 @@ def validateAccounts(self, accounts, testSysAccounts=True): node.validateAccounts(myAccounts) - def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet=100, stakeCPU=100, buyRAM=100): + def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet=100, stakeCPU=100, buyRAM=10000): """create account, verify account and return transaction id""" assert(len(self.nodes) > 0) node=self.nodes[0] @@ -593,7 +606,7 @@ def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet= # return transId # return None - def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=100, exitOnError=False): + def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False): assert(len(self.nodes) > 0) node=self.nodes[0] trans=node.createInitializeAccount(account, creatorAccount, stakedDeposit, waitForTransBlock, stakeNet=stakeNet, stakeCPU=stakeCPU, buyRAM=buyRAM) @@ -683,6 +696,93 @@ def parseClusterKeys(totalNodes): return producerKeys + @staticmethod + def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): + """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" + + Utils.Print("Starting cluster bootstrap.") + biosNode=Node(biosHost, biosPort) + if not biosNode.checkPulse(): + Utils.Print("ERROR: Bios node doesn't appear to be running...") + return None + + cmd="bash bios_boot.sh" + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): + if not silent: Utils.Print("Launcher failed to shut down eos cluster.") + return None + + p = re.compile('error', re.IGNORECASE) + bootlog="eosio-ignition-wd/bootlog.txt" + with open(bootlog) as bootFile: + for line in bootFile: + if p.search(line): + Utils.Print("ERROR: bios_boot.sh script resulted in errors. See %s" % (bootlog)) + return None + + producerKeys=Cluster.parseClusterKeys(totalNodes) + # should have totalNodes node plus bios node + if producerKeys is None or len(producerKeys) < (totalNodes+1): + Utils.Print("ERROR: Failed to parse private keys from cluster config files.") + return None + + walletMgr=WalletMgr(True) + walletMgr.killall() + walletMgr.cleanup() + + if not walletMgr.launch(): + Utils.Print("ERROR: Failed to launch bootstrap wallet.") + return None + biosNode.setWalletEndpointArgs(walletMgr.walletEndpointArgs) + + try: + ignWallet=walletMgr.create("ignition") + if ignWallet is None: + Utils.Print("ERROR: Failed to create ignition wallet.") + return None + + eosioName="eosio" + eosioKeys=producerKeys[eosioName] + eosioAccount=Account(eosioName) + eosioAccount.ownerPrivateKey=eosioKeys["private"] + eosioAccount.ownerPublicKey=eosioKeys["public"] + eosioAccount.activePrivateKey=eosioKeys["private"] + eosioAccount.activePublicKey=eosioKeys["public"] + + if not walletMgr.importKey(eosioAccount, ignWallet): + Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) + return None + + initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) + Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) + trans=None + contract="eosio.token" + action="transfer" + for name, keys in producerKeys.items(): + data="{\"from\":\"eosio\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (name, initialFunds, "init transfer") + opts="--permission eosio@active" + if name != "eosio": + trans=biosNode.pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to transfer funds from eosio.token to %s." % (name)) + return None + + Node.validateTransaction(trans[1]) + + Utils.Print("Wait for last transfer transaction to become finalized.") + transId=Node.getTransId(trans[1]) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None + + Utils.Print("Cluster bootstrap done.") + finally: + if not dontKill: + walletMgr.killall() + walletMgr.cleanup() + + return biosNode + @staticmethod def bootstrap(totalNodes, prodCount, biosHost, biosPort, dontKill=False, onlyBios=False): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. @@ -1060,6 +1160,32 @@ def killall(self, silent=True, allInstances=False): except OSError as _: pass + def bounce(self, nodes, silent=True): + """Bounces nodeos instances as indicated by parameter nodes. + nodes should take the form of a comma-separated list as accepted by the launcher --bounce command (e.g. '00' or '00,01')""" + cmdArr = Cluster.__LauncherCmdArr.copy() + cmdArr.append("--bounce") + cmdArr.append(nodes) + cmd=" ".join(cmdArr) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + if 0 != subprocess.call(cmdArr): + if not silent: Utils.Print("Launcher failed to bounce nodes: %s." % (nodes)) + return False + return True + + def down(self, nodes, silent=True): + """Brings down nodeos instances as indicated by parameter nodes. + nodes should take the form of a comma-separated list as accepted by the launcher --bounce command (e.g. '00' or '00,01')""" + cmdArr = Cluster.__LauncherCmdArr.copy() + cmdArr.append("--down") + cmdArr.append(nodes) + cmd=" ".join(cmdArr) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + if 0 != subprocess.call(cmdArr): + if not silent: Utils.Print("Launcher failed to take down nodes: %s." % (nodes)) + return False + return True + def isMongodDbRunning(self): cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs) subcommand="db.version()" diff --git a/tests/Node.py b/tests/Node.py index a66b5e03fa3..39de900aa71 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -411,7 +411,7 @@ def isTransFinalized(self, transId): # Create & initialize account and return creation transactions. Return transaction json object - def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=100, exitOnError=False): + def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False): cmdDesc="system newaccount" cmd='%s -j %s %s %s %s --stake-net "%s %s" --stake-cpu "%s %s" --buy-ram "%s %s"' % ( cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, diff --git a/tests/launcher_test.py b/tests/launcher_test.py new file mode 100755 index 00000000000..8581c23d301 --- /dev/null +++ b/tests/launcher_test.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from TestHelper import TestHelper + +import decimal +import re + +############################################################### +# nodeos_run_test +# --dump-error-details +# --keep-logs +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit +cmdError=Utils.cmdError +from core_symbol import CORE_SYMBOL + +args = TestHelper.parse_args({"--defproducera_prvt_key","--dump-error-details","--dont-launch","--keep-logs", + "-v","--leave-running","--clean-run","--p2p-plugin"}) +debug=args.v +defproduceraPrvtKey=args.defproducera_prvt_key +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontLaunch=args.dont_launch +dontKill=args.leave_running +killAll=args.clean_run +p2pPlugin=args.p2p_plugin + +Utils.Debug=debug +cluster=Cluster(walletd=True, defproduceraPrvtKey=defproduceraPrvtKey) +walletMgr=WalletMgr(True) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName="keosd" +ClientName="cleos" +timeout = .5 * 12 * 2 + 60 # time for finalization with 1 producer + 60 seconds padding +Utils.setIrreversibleTimeout(timeout) + +try: + TestHelper.printSystemInfo("BEGIN") + + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + + if not dontLaunch: + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + if cluster.launch(pnodes=4, dontKill=dontKill, p2pPlugin=p2pPlugin) is False: + cmdError("launcher") + errorExit("Failed to stand up eos cluster.") + else: + cluster.initializeNodes(defproduceraPrvtKey=defproduceraPrvtKey) + killEosInstances=False + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + accounts=Cluster.createAccountKeys(3) + if accounts is None: + errorExit("FAILURE - create keys") + testeraAccount=accounts[0] + testeraAccount.name="testera11111" + currencyAccount=accounts[1] + currencyAccount.name="currency1111" + exchangeAccount=accounts[2] + exchangeAccount.name="exchange1111" + + PRV_KEY1=testeraAccount.ownerPrivateKey + PUB_KEY1=testeraAccount.ownerPublicKey + PRV_KEY2=currencyAccount.ownerPrivateKey + PUB_KEY2=currencyAccount.ownerPublicKey + PRV_KEY3=exchangeAccount.activePrivateKey + PUB_KEY3=exchangeAccount.activePublicKey + + testeraAccount.activePrivateKey=currencyAccount.activePrivateKey=PRV_KEY3 + testeraAccount.activePublicKey=currencyAccount.activePublicKey=PUB_KEY3 + + exchangeAccount.ownerPrivateKey=PRV_KEY2 + exchangeAccount.ownerPublicKey=PUB_KEY2 + + Print("Stand up %s" % (WalletdName)) + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + if walletMgr.launch() is False: + cmdError("%s" % (WalletdName)) + errorExit("Failed to stand up eos walletd.") + + testWalletName="test" + Print("Creating wallet \"%s\"." % (testWalletName)) + testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,cluster.defproduceraAccount]) + + Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) + + for account in accounts: + Print("Importing keys for account %s into wallet %s." % (account.name, testWallet.name)) + if not walletMgr.importKey(account, testWallet): + cmdError("%s wallet import" % (ClientName)) + errorExit("Failed to import key for account %s" % (account.name)) + + defproduceraWalletName="defproducera" + Print("Creating wallet \"%s\"." % (defproduceraWalletName)) + defproduceraWallet=walletMgr.create(defproduceraWalletName) + + Print("Wallet \"%s\" password=%s." % (defproduceraWalletName, defproduceraWallet.password.encode("utf-8"))) + + defproduceraAccount=cluster.defproduceraAccount + + Print("Importing keys for account %s into wallet %s." % (defproduceraAccount.name, defproduceraWallet.name)) + if not walletMgr.importKey(defproduceraAccount, defproduceraWallet): + cmdError("%s wallet import" % (ClientName)) + errorExit("Failed to import key for account %s" % (defproduceraAccount.name)) + + node=cluster.getNode(0) + + Print("Validating accounts before user accounts creation") + cluster.validateAccounts(None) + + # create accounts via eosio as otherwise a bid is needed + Print("Create new account %s via %s" % (testeraAccount.name, cluster.eosioAccount.name)) + transId=node.createInitializeAccount(testeraAccount, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) + + Print("Create new account %s via %s" % (currencyAccount.name, cluster.eosioAccount.name)) + transId=node.createInitializeAccount(currencyAccount, cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000, exitOnError=True) + + Print("Create new account %s via %s" % (exchangeAccount.name, cluster.eosioAccount.name)) + transId=node.createInitializeAccount(exchangeAccount, cluster.eosioAccount, buyRAM=1000000, waitForTransBlock=True, exitOnError=True) + + Print("Validating accounts after user accounts creation") + accounts=[testeraAccount, currencyAccount, exchangeAccount] + cluster.validateAccounts(accounts) + + Print("Verify account %s" % (testeraAccount)) + if not node.verifyAccount(testeraAccount): + errorExit("FAILURE - account creation failed.", raw=True) + + transferAmount="97.5321 {0}".format(CORE_SYMBOL) + Print("Transfer funds %s from account %s to %s" % (transferAmount, defproduceraAccount.name, testeraAccount.name)) + node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer") + + expectedAmount=transferAmount + Print("Verify transfer, Expected: %s" % (expectedAmount)) + actualAmount=node.getAccountEosBalanceStr(testeraAccount.name) + if expectedAmount != actualAmount: + cmdError("FAILURE - transfer failed") + errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) + + transferAmount="0.0100 {0}".format(CORE_SYMBOL) + Print("Force transfer funds %s from account %s to %s" % ( + transferAmount, defproduceraAccount.name, testeraAccount.name)) + node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", force=True) + + expectedAmount="97.5421 {0}".format(CORE_SYMBOL) + Print("Verify transfer, Expected: %s" % (expectedAmount)) + actualAmount=node.getAccountEosBalanceStr(testeraAccount.name) + if expectedAmount != actualAmount: + cmdError("FAILURE - transfer failed") + errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) + + Print("Validating accounts after some user transactions") + accounts=[testeraAccount, currencyAccount, exchangeAccount] + cluster.validateAccounts(accounts) + + transferAmount="97.5311 {0}".format(CORE_SYMBOL) + Print("Transfer funds %s from account %s to %s" % ( + transferAmount, testeraAccount.name, currencyAccount.name)) + trans=node.transferFunds(testeraAccount, currencyAccount, transferAmount, "test transfer a->b") + transId=Node.getTransId(trans) + + expectedAmount="98.0311 {0}".format(CORE_SYMBOL) # 5000 initial deposit + Print("Verify transfer, Expected: %s" % (expectedAmount)) + actualAmount=node.getAccountEosBalanceStr(currencyAccount.name) + if expectedAmount != actualAmount: + cmdError("FAILURE - transfer failed") + errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) + + Print("Validate last action for account %s" % (testeraAccount.name)) + actions=node.getActions(testeraAccount, -1, -1, exitOnError=True) + try: + assert(actions["actions"][0]["action_trace"]["act"]["name"] == "transfer") + except (AssertionError, TypeError, KeyError) as _: + Print("Action validation failed. Actions: %s" % (actions)) + raise + + node.waitForTransInBlock(transId) + + transaction=node.getTransaction(transId, exitOnError=True, delayedRetry=False) + + typeVal=None + amountVal=None + key="" + try: + key="[traces][0][act][name]" + typeVal= transaction["traces"][0]["act"]["name"] + key="[traces][0][act][data][quantity]" + amountVal=transaction["traces"][0]["act"]["data"]["quantity"] + amountVal=int(decimal.Decimal(amountVal.split()[0])*10000) + except (TypeError, KeyError) as e: + Print("transaction%s not found. Transaction: %s" % (key, transaction)) + raise + + if typeVal != "transfer" or amountVal != 975311: + errorExit("FAILURE - get transaction trans_id failed: %s %s %s" % (transId, typeVal, amountVal), raw=True) + + Print("Bouncing nodes #00 and #01") + if cluster.bounce("00,01") is False: + cmdError("launcher bounce") + errorExit("Failed to bounce eos node.") + + Print("Taking down node #02") + if cluster.down("02") is False: + cmdError("launcher down command") + errorExit("Failed to take down eos node.") + + Print("Using bounce option to re-launch node #02") + if cluster.bounce("02") is False: + cmdError("launcher bounce") + errorExit("Failed to bounce eos node.") + + p = re.compile('Assert') + errFileName="var/lib/node_00/stderr.txt" + assertionsFound=False + with open(errFileName) as errFile: + for line in errFile: + if p.search(line): + assertionsFound=True + + if assertionsFound: + # Too many assertion logs, hard to validate how many are genuine. Make this a warning + # for now, hopefully the logs will get cleaned up in future. + Print("WARNING: Asserts in var/lib/node_00/stderr.txt") + #errorExit("FAILURE - Assert in var/lib/node_00/stderr.txt") + + Print("Validating accounts at end of test") + accounts=[testeraAccount, currencyAccount, exchangeAccount] + cluster.validateAccounts(accounts) + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exit(0) diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 85be1bd940b..47c745e8aba 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -206,10 +206,10 @@ transId=node.createInitializeAccount(testeraAccount, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) Print("Create new account %s via %s" % (currencyAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(currencyAccount, cluster.eosioAccount, stakedDeposit=5000, exitOnError=True) + transId=node.createInitializeAccount(currencyAccount, cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000, exitOnError=True) Print("Create new account %s via %s" % (exchangeAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(exchangeAccount, cluster.eosioAccount, waitForTransBlock=True, exitOnError=True) + transId=node.createInitializeAccount(exchangeAccount, cluster.eosioAccount, buyRAM=1000000, waitForTransBlock=True, exitOnError=True) Print("Validating accounts after user accounts creation") accounts=[testeraAccount, currencyAccount, exchangeAccount] diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 2fe356f68b5..016dfbceb8c 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -81,7 +81,7 @@ def setName(self, num): maxRAMFlag="--chain-state-db-size-mb" maxRAMValue=1010 extraNodeosArgs=" %s %d %s %d " % (minRAMFlag, minRAMValue, maxRAMFlag, maxRAMValue) - if cluster.launch(onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs) is False: + if cluster.launch(onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs, useBiosBootFile=False) is False: Utils.cmdError("launcher") errorExit("Failed to stand up eos cluster.") diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index df9bbb689f4..dac5f8dd4a5 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -209,7 +209,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin) is False: + if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False: Utils.cmdError("launcher") errorExit("Failed to stand up eos cluster.") From fc1169d99698e14ad550c2c0a5d7db54df970cf3 Mon Sep 17 00:00:00 2001 From: Paul Calabrese Date: Wed, 29 Aug 2018 09:31:17 -0500 Subject: [PATCH 023/194] Enable new launcher test --- tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e70ce002336..7cee9ae4cc5 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -60,6 +60,7 @@ add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios add_test(NAME restart-scenarios-test-none COMMAND tests/restart-scenarios-test.py -c none --kill-sig term -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # TODO: add_test(NAME consensus-validation-malicious-producers COMMAND tests/consensus-validation-malicious-producers.py -w 80 --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME launcher_test COMMAND tests/launcher_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) From ecb446bbbdbca1e2b0ad3009c63667c63d3cd4ea Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 29 Aug 2018 09:40:02 -0500 Subject: [PATCH 024/194] Clean up from peer review comments --- unittests/api_tests.cpp | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 9ac15cc4473..f2524022254 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -448,21 +448,16 @@ BOOST_FIXTURE_TEST_CASE(require_notice_tests, TESTER) { try { produce_blocks(1); // test require_notice - auto scope = std::vector{N(testapi)}; - auto test_require_notice = [this](auto& test, std::vector& scope){ - signed_transaction trx; - auto tm = test_api_action{}; - - action act(std::vector{{N(testapi), config::active_name}}, tm); - trx.actions.push_back(act); + signed_transaction trx; + auto tm = test_api_action{}; - test.set_transaction_headers(trx); - trx.sign(test.get_private_key(N(testapi), "active"), control->get_chain_id()); - auto res = test.push_transaction(trx); - BOOST_CHECK_EQUAL(res->receipt->status, transaction_receipt::executed); - }; + action act( std::vector{{N( testapi ), config::active_name}}, tm ); + trx.actions.push_back( act ); - test_require_notice(*this, scope); // no exception expected + set_transaction_headers( trx ); + trx.sign( get_private_key( N( testapi ), "active" ), control->get_chain_id() ); + auto res = push_transaction( trx ); + BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); } FC_LOG_AND_RETHROW() } From 86b5b8c06cde310f631e206cb272d9c6319f51b5 Mon Sep 17 00:00:00 2001 From: Marlon Williams Date: Wed, 29 Aug 2018 10:54:46 -0700 Subject: [PATCH 025/194] Update chain_plugin.hpp Making ``core_symbol`` and ``core_symbol_precision`` optional --- .../chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 891f63b4f70..373c0aacebb 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -97,8 +97,8 @@ class read_only { //string recent_slots; //double participation_rate = 0; optional server_version_string; - string core_symbol; - uint64_t core_symbol_precision; + optional core_symbol; + optional core_symbol_precision; }; get_info_results get_info(const get_info_params&) const; From c1a1ff0582a20c1e7ae91437c7eadcaf61a8972a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 29 Aug 2018 13:12:49 -0500 Subject: [PATCH 026/194] Remove old exchange contract --- contracts/CMakeLists.txt | 1 - contracts/eosio.system/exchange_state.cpp | 2 +- contracts/exchange/CMakeLists.txt | 12 - .../Pegged Derivative Currency Design.md | 120 ---- contracts/exchange/exchange.abi | 166 ------ contracts/exchange/exchange.cpp | 250 --------- contracts/exchange/exchange.hpp | 87 --- contracts/exchange/exchange_accounts.cpp | 27 - contracts/exchange/exchange_accounts.hpp | 43 -- contracts/exchange/exchange_state.cpp | 87 --- contracts/exchange/exchange_state.hpp | 87 --- contracts/exchange/market_state.cpp | 223 -------- contracts/exchange/market_state.hpp | 77 --- contracts/exchange/test_exchange.cpp | 518 ------------------ tests/CMakeLists.txt | 2 +- tests/nodeos_run_test.py | 14 +- unittests/CMakeLists.txt | 2 +- unittests/exchange_tests.cpp | 358 ------------ unittests/multisig_tests.cpp | 5 +- 19 files changed, 5 insertions(+), 2076 deletions(-) delete mode 100644 contracts/exchange/CMakeLists.txt delete mode 100644 contracts/exchange/Pegged Derivative Currency Design.md delete mode 100644 contracts/exchange/exchange.abi delete mode 100644 contracts/exchange/exchange.cpp delete mode 100644 contracts/exchange/exchange.hpp delete mode 100644 contracts/exchange/exchange_accounts.cpp delete mode 100644 contracts/exchange/exchange_accounts.hpp delete mode 100644 contracts/exchange/exchange_state.cpp delete mode 100644 contracts/exchange/exchange_state.hpp delete mode 100644 contracts/exchange/market_state.cpp delete mode 100644 contracts/exchange/market_state.hpp delete mode 100644 contracts/exchange/test_exchange.cpp delete mode 100644 unittests/exchange_tests.cpp diff --git a/contracts/CMakeLists.txt b/contracts/CMakeLists.txt index 99a32aa53fa..c6eb0903d63 100644 --- a/contracts/CMakeLists.txt +++ b/contracts/CMakeLists.txt @@ -15,7 +15,6 @@ add_subdirectory(multi_index_test) add_subdirectory(eosio.system) add_subdirectory(identity) add_subdirectory(stltest) -add_subdirectory(exchange) add_subdirectory(test.inline) #add_subdirectory(bancor) diff --git a/contracts/eosio.system/exchange_state.cpp b/contracts/eosio.system/exchange_state.cpp index b621bdef902..621d3e714b3 100644 --- a/contracts/eosio.system/exchange_state.cpp +++ b/contracts/eosio.system/exchange_state.cpp @@ -1,4 +1,4 @@ -#include +#include namespace eosiosystem { asset exchange_state::convert_to_exchange( connector& c, asset in ) { diff --git a/contracts/exchange/CMakeLists.txt b/contracts/exchange/CMakeLists.txt deleted file mode 100644 index c855872ef5d..00000000000 --- a/contracts/exchange/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -file(GLOB ABI_FILES "*.abi") -add_wast_executable(TARGET exchange - INCLUDE_FOLDERS "${STANDARD_INCLUDE_FOLDERS}" - LIBRARIES libc++ libc eosiolib - DESTINATION_FOLDER ${CMAKE_CURRENT_BINARY_DIR} -) -configure_file("${ABI_FILES}" "${CMAKE_CURRENT_BINARY_DIR}" COPYONLY) - -add_executable(test_exchange test_exchange.cpp ) -#bfp/lib/pack.c bfp/lib/posit.cpp bfp/lib/util.c bfp/lib/op2.c) -target_link_libraries( test_exchange fc ) -target_include_directories( test_exchange PUBLIC fixed_point/include ) diff --git a/contracts/exchange/Pegged Derivative Currency Design.md b/contracts/exchange/Pegged Derivative Currency Design.md deleted file mode 100644 index fc819ccd61f..00000000000 --- a/contracts/exchange/Pegged Derivative Currency Design.md +++ /dev/null @@ -1,120 +0,0 @@ -# Pegged Derivative Currency Design - -A currency is designed to be a fungible and non-callable asset. A pegged Derivative currency, such as BitUSD, is backed by a cryptocurrency held as collateral. The "issuer" is "short" the dollar and extra-long the cryptocurrency. The buyer is simply long the dollar. - - - -Background ----------- -BitShares created the first working pegged asset system by allowing anyone to take out a short position by posting collateral and issuing BitUSD at a minimum 1.5:1 collateral:debt ratio. The **least collateralized position** was forced to provide liquidity for BitUSD holders -any time the market price fell more than a couple percent below the dollar (if the BitUSD holder opted to use forced liquidation). - -To prevent abuse of the price feed, all forced liquidation was delayed. - -In the event of a "black swan" all shorts have their positions liquidated at the price feed and all holders of BitUSD are only promised a fixed redemption rate. - -There are several problems with this design: - -1. There is very **poor liquidity** in the BitUSD / BitShares market creating **large spreads** -2. The shorts take all the risk and only profit when the price of BitShares rises -3. Blackswans are perpetual and very disruptive. -4. It is "every short for themselves" -5. Due to the risk/reward ratio the supply can be limited -6. The **collateral requirements** limit opportunity for leverage. - -New Approach ------------- -We present a new approach to pegged assets where the short-positions cooperate to provide the -service of a pegged asset with **high liquidity**. They make money by encouraging people to trade -their pegged asset and earning income **from the trading fees rather than seeking heavy leverage** -in a speculative market. They also generate money by earning interest on personal short positions. - -The Setup Process ------------------ -An initial user deposits a Collateral Currency (C) into an smart contract and provides the initial -price feed. A new Debt token (D) is issued based upon the price feed and a 1.5:1 C:D ratio and the -issued tokens are deposited into the **Bancor market maker**. At this point in time there is 0 leverage by -the market maker because no D have been sold. The initial user is also issued exchange tokens (E) in the -market maker. - -At this point people can buy E or D and the Bancor algorithm will provide liquidity between C, E, and D. Due to -the fees charged by the the market maker the value of E will increase in terms of C. - -> Collateral currency = Smart Token/reserve of parent currency -> -> Issued tokens = Bounty Tokens (distributed to early holders / community supporters) -> -> Collateral Ratio (C:D) = reciprocal of Loan-to-Value Ratio (LTV) - -Maintaining the Peg -------------------- -To maximize the utility of the D token, the market maker needs to maintain a **narrow trading range** of D vs the Dollar. -The more **consistant and reliable this trading range** is, the more people (arbitrageur) will be willing to hold and trade D. There are several -situations that can occur: - -1. D is trading above a dollar +5% - - a. Maker is fully collateralized `C:D>1.5` - - - issue new D and deposit into maker such that collateral ratio is 1.5:1 - b. Maker is not fully collateralized `C:D<1.5` - - - adjust the maker weights to lower the redemption prices (defending capital of maker), arbitrageur will probably prevent this reality. - - > Marker Weights = Connector Weights (in Bancor) - > - > Redemption Price: The price at which a bond may be repurchased by the issuer before maturity - -2. D is selling for less than a dollar -5% - - a. Maker is fully collateralized `C:D>1.5` - - - adjust the maker weights to increase redemption prices - b. Maker is under collateralized `C:D<1.5` - ``` - - stop E -> C and E -> D trades. - - offer bonus on C->E and D->E trades. - - on D->E conversions take received D out of circulation rather than add to the market maker - - on C<->D conversion continue as normal - - stop attempting adjusting maker ratio to defend the price feed and let the price rise until above +1% - ``` - -Value of E = C - D where D == all in circulation, so E->C conversions should always assume all outstanding D was **settled at current maker price**. The result of such a conversion will **raise the collateral ratio**, unless they are forced to buy and retire some D at the current ratio. The algorithm must ensure the individual selling E doesn't leave those holding E worse-off from a D/E perspective (doesnot reduce D to a large extent). An individual buying E will create new D to maintain the same D/E ratio. - -This implies that when value of all outstanding D is greater than all C that E cannot be sold until the network -generates **enough in trading fees** to recaptialize the market. This is like a company with more debt than equity not allowing buybacks. In fact, **E should not be sellable any time the collateral ratio falls below 1.5:1**. - -BitShares is typical **margin call** territory, but holders of E have a chance at future liquidity if the situation improves. While E is not sellable, -E can be purchased at a 10% discount to its theoretical value, this will dilute existing holders of E but will raise capital and hopefully move E holders closer to eventual liquidity. - - -Adjusting Bancor Ratios by Price Feed -------------------------------------- -The price feed informs the algorithm of significant deviations between the Bancor effective price and the target peg. The price feed is necessarily a lagging indicator and may also factor in natural spreads between different exchanges. Therefore, the price feed shall have no impact unless there is a significant deviation (5%). When such a deviation occurs, the ratio is automatically adjusted to 4%. - -In other words, the price feed keeps the maker in the "channel" but does not attempt to set the real-time prices. If there is a sudden change and the price feed differs from maker by 50% then after the adjustment it will still differ by 4%. - -> Effective Price = Connected Tokens exchanged / Smart Tokens exchanged - -Summary -------- -Under this model holders of E are short the dollar and make money to recollateralize their positions via market activity. -Anyone selling E must **realize the losses as a result of being short**. -Anyone buying E can get in to take their place at the current collateral ratio. - -The value of E is equal to the value of a **margin postion**. -Anyone can buy E for a combination C and D equal to the current collateral ratio. - -Anyone may sell E for a personal margin position with equal ratio of C and D. -Anyone may buy E with a personal margin position. - -If they only have C, then they must use some of C to buy D first (which will move the price). -If they only have D, then they must use some of D to buy C first (which will also move the price). - -Anyone can buy and sell E based upon Bancor balances of C and (all D), they must sell their E for a combination of D and C at current ratio, then sell the C or D for the other. - - -Anytime collateral level falls below 1.5 selling E is blocked and buying of E is given a 10% bonus. -Anyone can convert D<->C using Bancor maker configured to maintain price within +/- 5% of the price feed. - - diff --git a/contracts/exchange/exchange.abi b/contracts/exchange/exchange.abi deleted file mode 100644 index b4cde189371..00000000000 --- a/contracts/exchange/exchange.abi +++ /dev/null @@ -1,166 +0,0 @@ -{ - "version": "eosio::abi/1.0", - "types": [{ - "new_type_name": "account_name", - "type": "name" - } - ], - "structs": [ - { - "name": "extended_symbol", - "base": "", - "fields": [ - {"name":"sym", "type":"symbol"}, - {"name":"contract", "type":"account_name"} - ] - }, - { - "name": "extended_asset", - "base": "", - "fields": [ - {"name":"quantity", "type":"asset"}, - {"name":"contract", "type":"account_name"} - ] - }, - { - "name": "upmargin", - "base": "", - "fields": [ - {"name":"borrower", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"delta_borrow", "type":"extended_asset"}, - {"name":"delta_collateral", "type":"extended_asset"} - ] - }, - { - "name": "covermargin", - "base": "", - "fields": [ - {"name":"borrower", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"cover_amount", "type":"extended_asset"} - ] - }, - { - "name": "lend", - "base": "", - "fields": [ - {"name":"lender", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"quantity", "type":"extended_asset"} - ] - }, - { - "name": "unlend", - "base": "", - "fields": [ - {"name":"lender", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"interest_shares", "type":"float64"}, - {"name":"interest_symbol", "type":"extended_symbol"} - ] - }, - { - "name": "trade", - "base": "", - "fields": [ - {"name":"seller", "type":"account_name"}, - {"name":"market", "type":"symbol"}, - {"name":"sell", "type":"extended_asset"}, - {"name":"min_receive", "type":"extended_asset"}, - {"name":"expire", "type":"uint32"}, - {"name":"fill_or_kill", "type":"uint8"} - ] - }, - { - "name": "createx", - "base": "", - "fields": [ - {"name":"creator", "type":"account_name"}, - {"name":"initial_supply", "type":"asset"}, - {"name":"fee", "type":"uint32"}, - {"name":"base_deposit", "type":"extended_asset"}, - {"name":"quote_deposit", "type":"extended_asset"} - ] - }, - { - "name": "transfer", - "base": "", - "fields": [ - {"name":"from", "type":"account_name"}, - {"name":"to", "type":"account_name"}, - {"name":"quantity", "type":"asset"}, - {"name":"memo", "type":"string"} - ] - }, - { - "name": "deposit", - "base": "", - "fields": [ - {"name":"from", "type":"account_name"}, - {"name":"quantity", "type":"extended_asset"} - ] - }, - { - "name": "create", - "base": "", - "fields": [ - {"name":"issuer", "type":"account_name"}, - {"name":"maximum_supply", "type":"asset"}, - {"name":"can_freeze", "type":"uint8"}, - {"name":"can_recall", "type":"uint8"}, - {"name":"can_whitelist", "type":"uint8"} - ] - },{ - "name": "issue", - "base": "", - "fields": [ - {"name":"to", "type":"account_name"}, - {"name":"quantity", "type":"asset"}, - {"name":"memo", "type":"string"} - ] - },{ - "name": "account", - "base": "", - "fields": [ - {"name":"currency", "type":"uint64"}, - {"name":"balance", "type":"uint64"} - ] - },{ - "name": "currency_stats", - "base": "", - "fields": [ - {"name":"currency", "type":"uint64"}, - {"name":"supply", "type":"uint64"} - ] - } - ], - "actions": [ - { "name": "deposit", "type": "deposit", "ricardian_contract": "" }, - { "name": "transfer", "type": "transfer", "ricardian_contract": "" }, - { "name": "trade", "type": "trade", "ricardian_contract": "" }, - { "name": "createx", "type": "createx", "ricardian_contract": "" }, - { "name": "issue", "type": "issue", "ricardian_contract": "" }, - { "name": "lend", "type": "lend", "ricardian_contract": "" }, - { "name": "unlend", "type": "unlend", "ricardian_contract": "" }, - { "name": "upmargin", "type": "upmargin", "ricardian_contract": "" }, - { "name": "covermargin", "type": "covermargin", "ricardian_contract": "" }, - { "name": "create", "type": "create", "ricardian_contract": "" } - ], - "tables": [{ - "name": "account", - "type": "account", - "index_type": "i64", - "key_names" : ["currency"], - "key_types" : ["uint64"] - },{ - "name": "stat", - "type": "currency_stats", - "index_type": "i64", - "key_names" : ["currency"], - "key_types" : ["uint64"] - } - ], - "ricardian_clauses": [], - "abi_extensions": [] -} diff --git a/contracts/exchange/exchange.cpp b/contracts/exchange/exchange.cpp deleted file mode 100644 index f90076cf3e1..00000000000 --- a/contracts/exchange/exchange.cpp +++ /dev/null @@ -1,250 +0,0 @@ -#include -#include "exchange.hpp" - -#include "exchange_state.cpp" -#include "exchange_accounts.cpp" -#include "market_state.cpp" - -#include - -namespace eosio { - - void exchange::deposit( account_name from, extended_asset quantity ) { - eosio_assert( quantity.is_valid(), "invalid quantity" ); - currency::inline_transfer( from, _this_contract, quantity, "deposit" ); - _accounts.adjust_balance( from, quantity, "deposit" ); - } - - void exchange::withdraw( account_name from, extended_asset quantity ) { - require_auth( from ); - eosio_assert( quantity.is_valid(), "invalid quantity" ); - eosio_assert( quantity.amount >= 0, "cannot withdraw negative balance" ); // Redundant? inline_transfer will fail if quantity is not positive. - _accounts.adjust_balance( from, -quantity ); - currency::inline_transfer( _this_contract, from, quantity, "withdraw" ); - } - - void exchange::on( const trade& t ) { - require_auth( t.seller ); - eosio_assert( t.sell.is_valid(), "invalid sell amount" ); - eosio_assert( t.sell.amount > 0, "sell amount must be positive" ); - eosio_assert( t.min_receive.is_valid(), "invalid min receive amount" ); - eosio_assert( t.min_receive.amount >= 0, "min receive amount cannot be negative" ); - - auto receive_symbol = t.min_receive.get_extended_symbol(); - eosio_assert( t.sell.get_extended_symbol() != receive_symbol, "invalid conversion" ); - - market_state market( _this_contract, t.market, _accounts ); - - auto temp = market.exstate; - auto output = temp.convert( t.sell, receive_symbol ); - - while( temp.requires_margin_call() ) { - market.margin_call( receive_symbol ); - temp = market.exstate; - output = temp.convert( t.sell, receive_symbol ); - } - market.exstate = temp; - - print( name{t.seller}, " ", t.sell, " => ", output, "\n" ); - - if( t.min_receive.amount != 0 ) { - eosio_assert( t.min_receive.amount <= output.amount, "unable to fill" ); - } - - _accounts.adjust_balance( t.seller, -t.sell, "sold" ); - _accounts.adjust_balance( t.seller, output, "received" ); - - if( market.exstate.supply.amount != market.initial_state().supply.amount ) { - auto delta = market.exstate.supply - market.initial_state().supply; - - _excurrencies.issue_currency( { .to = _this_contract, - .quantity = delta, - .memo = string("") } ); - } - - /// TODO: if pending order start deferred trx to fill it - - market.save(); - } - - - /** - * This action shall fail if it would result in a margin call - */ - void exchange::on( const upmargin& b ) { - require_auth( b.borrower ); - eosio_assert( b.delta_borrow.is_valid(), "invalid borrow delta" ); - eosio_assert( b.delta_collateral.is_valid(), "invalid collateral delta" ); - - market_state market( _this_contract, b.market, _accounts ); - - eosio_assert( b.delta_borrow.amount != 0 || b.delta_collateral.amount != 0, "no effect" ); - eosio_assert( b.delta_borrow.get_extended_symbol() != b.delta_collateral.get_extended_symbol(), "invalid args" ); - eosio_assert( market.exstate.base.balance.get_extended_symbol() == b.delta_borrow.get_extended_symbol() || - market.exstate.quote.balance.get_extended_symbol() == b.delta_borrow.get_extended_symbol(), - "invalid asset for market" ); - eosio_assert( market.exstate.base.balance.get_extended_symbol() == b.delta_collateral.get_extended_symbol() || - market.exstate.quote.balance.get_extended_symbol() == b.delta_collateral.get_extended_symbol(), - "invalid asset for market" ); - - market.update_margin( b.borrower, b.delta_borrow, b.delta_collateral ); - - /// if this succeeds then the borrower will see their balances adjusted accordingly, - /// if they don't have sufficient balance to either fund the collateral or pay off the - /// debt then this will fail before we go further. - _accounts.adjust_balance( b.borrower, b.delta_borrow, "borrowed" ); - _accounts.adjust_balance( b.borrower, -b.delta_collateral, "collateral" ); - - market.save(); - } - - void exchange::on( const covermargin& c ) { - require_auth( c.borrower ); - eosio_assert( c.cover_amount.is_valid(), "invalid cover amount" ); - eosio_assert( c.cover_amount.amount > 0, "cover amount must be positive" ); - - market_state market( _this_contract, c.market, _accounts ); - - market.cover_margin( c.borrower, c.cover_amount); - - market.save(); - } - - void exchange::createx( account_name creator, - asset initial_supply, - uint32_t /* fee */, - extended_asset base_deposit, - extended_asset quote_deposit - ) { - require_auth( creator ); - eosio_assert( initial_supply.is_valid(), "invalid initial supply" ); - eosio_assert( initial_supply.amount > 0, "initial supply must be positive" ); - eosio_assert( base_deposit.is_valid(), "invalid base deposit" ); - eosio_assert( base_deposit.amount > 0, "base deposit must be positive" ); - eosio_assert( quote_deposit.is_valid(), "invalid quote deposit" ); - eosio_assert( quote_deposit.amount > 0, "quote deposit must be positive" ); - eosio_assert( base_deposit.get_extended_symbol() != quote_deposit.get_extended_symbol(), - "must exchange between two different currencies" ); - - print( "base: ", base_deposit.get_extended_symbol() ); - print( "quote: ",quote_deposit.get_extended_symbol() ); - - auto exchange_symbol = initial_supply.symbol.name(); - print( "marketid: ", exchange_symbol, " \n " ); - - markets exstates( _this_contract, exchange_symbol ); - auto existing = exstates.find( exchange_symbol ); - - eosio_assert( existing == exstates.end(), "market already exists" ); - exstates.emplace( creator, [&]( auto& s ) { - s.manager = creator; - s.supply = extended_asset(initial_supply, _this_contract); - s.base.balance = base_deposit; - s.quote.balance = quote_deposit; - - s.base.peer_margin.total_lent.symbol = base_deposit.symbol; - s.base.peer_margin.total_lent.contract = base_deposit.contract; - s.base.peer_margin.total_lendable.symbol = base_deposit.symbol; - s.base.peer_margin.total_lendable.contract = base_deposit.contract; - - s.quote.peer_margin.total_lent.symbol = quote_deposit.symbol; - s.quote.peer_margin.total_lent.contract = quote_deposit.contract; - s.quote.peer_margin.total_lendable.symbol = quote_deposit.symbol; - s.quote.peer_margin.total_lendable.contract = quote_deposit.contract; - }); - - _excurrencies.create_currency( { .issuer = _this_contract, - // TODO: After currency contract respects maximum supply limits, the maximum supply here needs to be set appropriately. - .maximum_supply = asset( 0, initial_supply.symbol ), - .issuer_can_freeze = false, - .issuer_can_whitelist = false, - .issuer_can_recall = false } ); - - _excurrencies.issue_currency( { .to = _this_contract, - .quantity = initial_supply, - .memo = string("initial exchange tokens") } ); - - _accounts.adjust_balance( creator, extended_asset( initial_supply, _this_contract ), "new exchange issue" ); - _accounts.adjust_balance( creator, -base_deposit, "new exchange deposit" ); - _accounts.adjust_balance( creator, -quote_deposit, "new exchange deposit" ); - } - - void exchange::lend( account_name lender, symbol_type market, extended_asset quantity ) { - require_auth( lender ); - eosio_assert( quantity.is_valid(), "invalid quantity" ); - eosio_assert( quantity.amount > 0, "must lend a positive amount" ); - - market_state m( _this_contract, market, _accounts ); - m.lend( lender, quantity ); - m.save(); - } - - void exchange::unlend( account_name lender, symbol_type market, double interest_shares, extended_symbol interest_symbol ) { - require_auth( lender ); - eosio_assert( interest_shares > 0, "must unlend a positive amount" ); - - market_state m( _this_contract, market, _accounts ); - m.unlend( lender, interest_shares, interest_symbol ); - m.save(); - } - - - void exchange::on( const currency::transfer& t, account_name code ) { - if( code == _this_contract ) - _excurrencies.on( t ); - - if( t.to == _this_contract ) { - auto a = extended_asset(t.quantity, code); - eosio_assert( a.is_valid(), "invalid quantity in transfer" ); - eosio_assert( a.amount != 0, "zero quantity is disallowed in transfer"); - eosio_assert( a.amount > 0 || t.memo == "withdraw", "withdrew tokens without withdraw in memo"); - eosio_assert( a.amount < 0 || t.memo == "deposit", "received tokens without deposit in memo" ); - _accounts.adjust_balance( t.from, a, t.memo ); - } - } - - - #define N(X) ::eosio::string_to_name(#X) - - void exchange::apply( account_name contract, account_name act ) { - - if( act == N(transfer) ) { - on( unpack_action_data(), contract ); - return; - } - - if( contract != _this_contract ) - return; - - auto& thiscontract = *this; - switch( act ) { - EOSIO_API( exchange, (createx)(deposit)(withdraw)(lend)(unlend) ) - }; - - switch( act ) { - case N(trade): - on( unpack_action_data() ); - return; - case N(upmargin): - on( unpack_action_data() ); - return; - case N(covermargin): - on( unpack_action_data() ); - return; - default: - _excurrencies.apply( contract, act ); - return; - } - } - -} /// namespace eosio - - - -extern "C" { - [[noreturn]] void apply( uint64_t receiver, uint64_t code, uint64_t action ) { - eosio::exchange ex( receiver ); - ex.apply( code, action ); - eosio_exit(0); - } -} diff --git a/contracts/exchange/exchange.hpp b/contracts/exchange/exchange.hpp deleted file mode 100644 index 9ee3139e0b0..00000000000 --- a/contracts/exchange/exchange.hpp +++ /dev/null @@ -1,87 +0,0 @@ -#include -#include -#include -#include -#include - -namespace eosio { - - /** - * This contract enables users to create an exchange between any pair of - * standard currency types. A new exchange is created by funding it with - * an equal value of both sides of the order book and giving the issuer - * the initial shares in that orderbook. - * - * To prevent excessive rounding errors, the initial deposit should include - * a sizeable quantity of both the base and quote currencies and the exchange - * shares should have a quantity 100x the quantity of the largest initial - * deposit. - * - * Users must deposit funds into the exchange before they can trade on the - * exchange. - * - * Each time an exchange is created a new currency for that exchanges market - * maker is also created. This currencies supply and symbol must be unique and - * it uses the currency contract's tables to manage it. - */ - class exchange { - private: - account_name _this_contract; - currency _excurrencies; - exchange_accounts _accounts; - - public: - exchange( account_name self ) - :_this_contract(self), - _excurrencies(self), - _accounts(self) - {} - - void createx( account_name creator, - asset initial_supply, - uint32_t fee, - extended_asset base_deposit, - extended_asset quote_deposit - ); - - void deposit( account_name from, extended_asset quantity ); - void withdraw( account_name from, extended_asset quantity ); - void lend( account_name lender, symbol_type market, extended_asset quantity ); - - void unlend( - account_name lender, - symbol_type market, - double interest_shares, - extended_symbol interest_symbol - ); - - struct covermargin { - account_name borrower; - symbol_type market; - extended_asset cover_amount; - }; - - struct upmargin { - account_name borrower; - symbol_type market; - extended_asset delta_borrow; - extended_asset delta_collateral; - }; - - struct trade { - account_name seller; - symbol_type market; - extended_asset sell; - extended_asset min_receive; - uint32_t expire = 0; - uint8_t fill_or_kill = true; - }; - - void on( const trade& t ); - void on( const upmargin& b ); - void on( const covermargin& b ); - void on( const currency::transfer& t, account_name code ); - - void apply( account_name contract, account_name act ); - }; -} // namespace eosio diff --git a/contracts/exchange/exchange_accounts.cpp b/contracts/exchange/exchange_accounts.cpp deleted file mode 100644 index 249b56c3e66..00000000000 --- a/contracts/exchange/exchange_accounts.cpp +++ /dev/null @@ -1,27 +0,0 @@ -#include - -namespace eosio { - - void exchange_accounts::adjust_balance( account_name owner, extended_asset delta, const string& reason ) { - (void)reason; - - auto table = exaccounts_cache.find( owner ); - if( table == exaccounts_cache.end() ) { - table = exaccounts_cache.emplace( owner, exaccounts(_this_contract, owner ) ).first; - } - auto useraccounts = table->second.find( owner ); - if( useraccounts == table->second.end() ) { - table->second.emplace( owner, [&]( auto& exa ){ - exa.owner = owner; - exa.balances[delta.get_extended_symbol()] = delta.amount; - eosio_assert( delta.amount >= 0, "overdrawn balance 1" ); - }); - } else { - table->second.modify( useraccounts, 0, [&]( auto& exa ) { - const auto& b = exa.balances[delta.get_extended_symbol()] += delta.amount; - eosio_assert( b >= 0, "overdrawn balance 2" ); - }); - } - } - -} /// namespace eosio diff --git a/contracts/exchange/exchange_accounts.hpp b/contracts/exchange/exchange_accounts.hpp deleted file mode 100644 index 2ec6027c5ec..00000000000 --- a/contracts/exchange/exchange_accounts.hpp +++ /dev/null @@ -1,43 +0,0 @@ -#pragma once -#include -#include - -namespace eosio { - - using boost::container::flat_map; - - /** - * Each user has their own account with the exchange contract that keeps track - * of how much a user has on deposit for each extended asset type. The assumption - * is that storing a single flat map of all balances for a particular user will - * be more practical than breaking this down into a multi-index table sorted by - * the extended_symbol. - */ - struct exaccount { - account_name owner; - flat_map balances; - - uint64_t primary_key() const { return owner; } - EOSLIB_SERIALIZE( exaccount, (owner)(balances) ) - }; - - typedef eosio::multi_index exaccounts; - - - /** - * Provides an abstracted interface around storing balances for users. This class - * caches tables to make multiple accesses effecient. - */ - struct exchange_accounts { - exchange_accounts( account_name code ):_this_contract(code){} - - void adjust_balance( account_name owner, extended_asset delta, const string& reason = string() ); - - private: - account_name _this_contract; - /** - * Keep a cache of all accounts tables we access - */ - flat_map exaccounts_cache; - }; -} /// namespace eosio diff --git a/contracts/exchange/exchange_state.cpp b/contracts/exchange/exchange_state.cpp deleted file mode 100644 index 7f40fae9641..00000000000 --- a/contracts/exchange/exchange_state.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include - -namespace eosio { - extended_asset exchange_state::convert_to_exchange( connector& c, extended_asset in ) { - - real_type R(supply.amount); - real_type C(c.balance.amount+in.amount); - real_type F(c.weight/1000.0); - real_type T(in.amount); - real_type ONE(1.0); - - real_type E = -R * (ONE - std::pow( ONE + T / C, F) ); - int64_t issued = int64_t(E); - - supply.amount += issued; - c.balance.amount += in.amount; - - return extended_asset( issued, supply.get_extended_symbol() ); - } - - extended_asset exchange_state::convert_from_exchange( connector& c, extended_asset in ) { - eosio_assert( in.contract == supply.contract, "unexpected asset contract input" ); - eosio_assert( in.symbol== supply.symbol, "unexpected asset symbol input" ); - - real_type R(supply.amount - in.amount); - real_type C(c.balance.amount); - real_type F(1000.0/c.weight); - real_type E(in.amount); - real_type ONE(1.0); - - - real_type T = C * (std::pow( ONE + E/R, F) - ONE); - int64_t out = int64_t(T); - - supply.amount -= in.amount; - c.balance.amount -= out; - - return extended_asset( out, c.balance.get_extended_symbol() ); - } - - extended_asset exchange_state::convert( extended_asset from, extended_symbol to ) { - auto sell_symbol = from.get_extended_symbol(); - auto ex_symbol = supply.get_extended_symbol(); - auto base_symbol = base.balance.get_extended_symbol(); - auto quote_symbol = quote.balance.get_extended_symbol(); - - if( sell_symbol != ex_symbol ) { - if( sell_symbol == base_symbol ) { - from = convert_to_exchange( base, from ); - } else if( sell_symbol == quote_symbol ) { - from = convert_to_exchange( quote, from ); - } else { - eosio_assert( false, "invalid sell" ); - } - } else { - if( to == base_symbol ) { - from = convert_from_exchange( base, from ); - } else if( to == quote_symbol ) { - from = convert_from_exchange( quote, from ); - } else { - eosio_assert( false, "invalid conversion" ); - } - } - - if( to != from.get_extended_symbol() ) - return convert( from, to ); - - return from; - } - - bool exchange_state::requires_margin_call( const exchange_state::connector& con )const { - if( con.peer_margin.total_lent.amount > 0 ) { - auto tmp = *this; - auto base_total_col = int64_t(con.peer_margin.total_lent.amount * con.peer_margin.least_collateralized); - auto covered = tmp.convert( extended_asset( base_total_col, con.balance.get_extended_symbol()), con.peer_margin.total_lent.get_extended_symbol() ); - if( covered.amount <= con.peer_margin.total_lent.amount ) - return true; - } - return false; - } - - bool exchange_state::requires_margin_call()const { - return requires_margin_call( base ) || requires_margin_call( quote ); - } - - -} /// namespace eosio diff --git a/contracts/exchange/exchange_state.hpp b/contracts/exchange/exchange_state.hpp deleted file mode 100644 index 1d102578062..00000000000 --- a/contracts/exchange/exchange_state.hpp +++ /dev/null @@ -1,87 +0,0 @@ -#pragma once - -#include - -namespace eosio { - - typedef double real_type; - - struct margin_state { - extended_asset total_lendable; - extended_asset total_lent; - real_type least_collateralized = std::numeric_limits::max(); - - /** - * Total shares allocated to those who have lent, when someone unlends they get - * total_lendable * user_interest_shares / interest_shares and total_lendable is reduced. - * - * When interest is paid, it shows up in total_lendable - */ - real_type interest_shares = 0; - - real_type lend( int64_t new_lendable ) { - if( total_lendable.amount > 0 ) { - real_type new_shares = (interest_shares * new_lendable) / total_lendable.amount; - interest_shares += new_shares; - total_lendable.amount += new_lendable; - } else { - interest_shares += new_lendable; - total_lendable.amount += new_lendable; - } - return new_lendable; - } - - extended_asset unlend( double ishares ) { - extended_asset result = total_lent; - print( "unlend: ", ishares, " existing interest_shares: ", interest_shares, "\n" ); - result.amount = int64_t( (ishares * total_lendable.amount) / interest_shares ); - - total_lendable.amount -= result.amount; - interest_shares -= ishares; - - eosio_assert( interest_shares >= 0, "underflow" ); - eosio_assert( total_lendable.amount >= 0, "underflow" ); - - return result; - } - - EOSLIB_SERIALIZE( margin_state, (total_lendable)(total_lent)(least_collateralized)(interest_shares) ) - }; - - /** - * Uses Bancor math to create a 50/50 relay between two asset types. The state of the - * bancor exchange is entirely contained within this struct. There are no external - * side effects associated with using this API. - */ - struct exchange_state { - account_name manager; - extended_asset supply; - uint32_t fee = 0; - - struct connector { - extended_asset balance; - uint32_t weight = 500; - - margin_state peer_margin; /// peer_connector collateral lending balance - - EOSLIB_SERIALIZE( connector, (balance)(weight)(peer_margin) ) - }; - - connector base; - connector quote; - - uint64_t primary_key()const { return supply.symbol.name(); } - - extended_asset convert_to_exchange( connector& c, extended_asset in ); - extended_asset convert_from_exchange( connector& c, extended_asset in ); - extended_asset convert( extended_asset from, extended_symbol to ); - - bool requires_margin_call( const exchange_state::connector& con )const; - bool requires_margin_call()const; - - EOSLIB_SERIALIZE( exchange_state, (manager)(supply)(fee)(base)(quote) ) - }; - - typedef eosio::multi_index markets; - -} /// namespace eosio diff --git a/contracts/exchange/market_state.cpp b/contracts/exchange/market_state.cpp deleted file mode 100644 index 78fa7ba9f87..00000000000 --- a/contracts/exchange/market_state.cpp +++ /dev/null @@ -1,223 +0,0 @@ -#include -#include - -namespace eosio { - - market_state::market_state( account_name this_contract, symbol_type market_symbol, exchange_accounts& acnts ) - :marketid( market_symbol.name() ), - market_table( this_contract, marketid ), - base_margins( this_contract, (marketid<<4) + 1), - quote_margins( this_contract, (marketid<<4) + 2), - base_loans( this_contract, (marketid<<4) + 1), - quote_loans( this_contract, (marketid<<4) + 2), - _accounts(acnts), - market_state_itr( market_table.find(marketid) ) - { - eosio_assert( market_state_itr != market_table.end(), "unknown market" ); - exstate = *market_state_itr; - } - - void market_state::margin_call( extended_symbol debt_type ) { - if( debt_type == exstate.base.balance.get_extended_symbol() ) - margin_call( exstate.base, base_margins ); - else - margin_call( exstate.quote, quote_margins ); - } - - void market_state::margin_call( exchange_state::connector& c, margins& marginstable ) { - auto price_idx = marginstable.get_index(); - auto pos = price_idx.begin(); - if( pos == price_idx.end() ) - return; - - auto receipt = exstate.convert( pos->collateral, pos->borrowed.get_extended_symbol() ); - eosio_assert( receipt.amount >= pos->borrowed.amount, "programmer error: insufficient collateral to cover" );/// VERY BAD, SHOULD NOT HAPPEN - auto change_debt = receipt - pos->borrowed; - - auto change_collat = exstate.convert( change_debt, pos->collateral.get_extended_symbol() ); - - _accounts.adjust_balance( pos->owner, change_collat ); - - c.peer_margin.total_lent.amount -= pos->borrowed.amount; - price_idx.erase(pos); - - pos = price_idx.begin(); - if( pos != price_idx.end() ) - c.peer_margin.least_collateralized = pos->call_price; - else - c.peer_margin.least_collateralized = double(uint64_t(-1)); - } - - - const exchange_state& market_state::initial_state()const { - return *market_state_itr; - } - - void market_state::lend( account_name lender, const extended_asset& quantity ) { - auto sym = quantity.get_extended_symbol(); - _accounts.adjust_balance( lender, -quantity ); - - if( sym == exstate.base.balance.get_extended_symbol() ) { - double new_shares = exstate.base.peer_margin.lend( quantity.amount ); - adjust_lend_shares( lender, base_loans, new_shares ); - } - else if( sym == exstate.quote.balance.get_extended_symbol() ) { - double new_shares = exstate.quote.peer_margin.lend( quantity.amount ); - adjust_lend_shares( lender, quote_loans, new_shares ); - } - else eosio_assert( false, "unable to lend to this market" ); - } - - void market_state::unlend( account_name lender, double ishares, const extended_symbol& sym ) { - eosio_assert( ishares > 0, "cannot unlend negative balance" ); - adjust_lend_shares( lender, base_loans, -ishares ); - - print( "sym: ", sym ); - - if( sym == exstate.base.balance.get_extended_symbol() ) { - extended_asset unlent = exstate.base.peer_margin.unlend( ishares ); - _accounts.adjust_balance( lender, unlent ); - } - else if( sym == exstate.quote.balance.get_extended_symbol() ) { - extended_asset unlent = exstate.quote.peer_margin.unlend( ishares ); - _accounts.adjust_balance( lender, unlent ); - } - else eosio_assert( false, "unable to lend to this market" ); - } - - - - void market_state::adjust_lend_shares( account_name lender, loans& l, double delta ) { - auto existing = l.find( lender ); - if( existing == l.end() ) { - l.emplace( lender, [&]( auto& obj ) { - obj.owner = lender; - obj.interest_shares = delta; - eosio_assert( delta >= 0, "underflow" ); - }); - } else { - l.modify( existing, 0, [&]( auto& obj ) { - obj.interest_shares += delta; - eosio_assert( obj.interest_shares >= 0, "underflow" ); - }); - } - } - - void market_state::cover_margin( account_name borrower, const extended_asset& cover_amount ) { - if( cover_amount.get_extended_symbol() == exstate.base.balance.get_extended_symbol() ) { - cover_margin( borrower, base_margins, exstate.base, cover_amount ); - } else if( cover_amount.get_extended_symbol() == exstate.quote.balance.get_extended_symbol() ) { - cover_margin( borrower, quote_margins, exstate.quote, cover_amount ); - } else { - eosio_assert( false, "invalid debt asset" ); - } - } - - - /** - * This method will use the collateral to buy the borrowed asset from the market - * with collateral to cancel the debt. - */ - void market_state::cover_margin( account_name borrower, margins& m, exchange_state::connector& c, - const extended_asset& cover_amount ) - { - auto existing = m.find( borrower ); - eosio_assert( existing != m.end(), "no known margin position" ); - eosio_assert( existing->borrowed.amount >= cover_amount.amount, "attempt to cover more than user has" ); - - auto tmp = exstate; - auto estcol = tmp.convert( cover_amount, existing->collateral.get_extended_symbol() ); - auto debpaid = exstate.convert( estcol, cover_amount.get_extended_symbol() ); - eosio_assert( debpaid.amount >= cover_amount.amount, "unable to cover debt" ); - - auto refundcover = debpaid - cover_amount; - - auto refundcol = exstate.convert( refundcover, existing->collateral.get_extended_symbol() ); - estcol.amount -= refundcol.amount; - - if( existing->borrowed.amount == cover_amount.amount ) { - auto freedcollateral = existing->collateral - estcol; - m.erase( existing ); - existing = m.begin(); - _accounts.adjust_balance( borrower, freedcollateral ); - } - else { - m.modify( existing, 0, [&]( auto& obj ) { - obj.collateral.amount -= estcol.amount; - obj.borrowed.amount -= cover_amount.amount; - obj.call_price = double(obj.borrowed.amount) / obj.collateral.amount; - }); - } - c.peer_margin.total_lent.amount -= cover_amount.amount; - - if( existing != m.end() ) { - if( existing->call_price < c.peer_margin.least_collateralized ) - c.peer_margin.least_collateralized = existing->call_price; - } else { - c.peer_margin.least_collateralized = std::numeric_limits::max(); - } - } - - void market_state::update_margin( account_name borrower, const extended_asset& delta_debt, const extended_asset& delta_col ) - { - if( delta_debt.get_extended_symbol() == exstate.base.balance.get_extended_symbol() ) { - adjust_margin( borrower, base_margins, exstate.base, delta_debt, delta_col ); - } else if( delta_debt.get_extended_symbol() == exstate.quote.balance.get_extended_symbol() ) { - adjust_margin( borrower, quote_margins, exstate.quote, delta_debt, delta_col ); - } else { - eosio_assert( false, "invalid debt asset" ); - } - } - - void market_state::adjust_margin( account_name borrower, margins& m, exchange_state::connector& c, - const extended_asset& delta_debt, const extended_asset& delta_col ) - { - auto existing = m.find( borrower ); - if( existing == m.end() ) { - eosio_assert( delta_debt.amount > 0, "cannot borrow neg" ); - eosio_assert( delta_col.amount > 0, "cannot have neg collat" ); - - existing = m.emplace( borrower, [&]( auto& obj ) { - obj.owner = borrower; - obj.borrowed = delta_debt; - obj.collateral = delta_col; - obj.call_price = double(obj.borrowed.amount) / obj.collateral.amount; - }); - } else { - if( existing->borrowed.amount == -delta_debt.amount ) { - eosio_assert( existing->collateral.amount == -delta_col.amount, "user failed to claim all collateral" ); - - m.erase( existing ); - existing = m.begin(); - } else { - m.modify( existing, 0, [&]( auto& obj ) { - obj.borrowed += delta_debt; - obj.collateral += delta_col; - obj.call_price = double(obj.borrowed.amount) / obj.collateral.amount; - }); - } - } - - c.peer_margin.total_lent += delta_debt; - eosio_assert( c.peer_margin.total_lent.amount <= c.peer_margin.total_lendable.amount, "insufficient funds availalbe to borrow" ); - - if( existing != m.end() ) { - if( existing->call_price < c.peer_margin.least_collateralized ) - c.peer_margin.least_collateralized = existing->call_price; - - eosio_assert( !exstate.requires_margin_call( c ), "this update would trigger a margin call" ); - } else { - c.peer_margin.least_collateralized = std::numeric_limits::max(); - } - - } - - - - void market_state::save() { - market_table.modify( market_state_itr, 0, [&]( auto& s ) { - s = exstate; - }); - } - -} diff --git a/contracts/exchange/market_state.hpp b/contracts/exchange/market_state.hpp deleted file mode 100644 index e145ef61cc1..00000000000 --- a/contracts/exchange/market_state.hpp +++ /dev/null @@ -1,77 +0,0 @@ -#pragma once -#include -#include - -namespace eosio { - - /** - * We calculate a unique scope for each market/borrowed_symbol/collateral_symbol and then - * instantiate a table of margin positions... with in this table each user has exactly - * one position and therefore the owner can serve as the primary key. - */ - struct margin_position { - account_name owner; - extended_asset borrowed; - extended_asset collateral; - double call_price = 0; - - uint64_t get_call()const { return uint64_t(1000000*call_price); } - uint64_t primary_key()const { return owner; } - - EOSLIB_SERIALIZE( margin_position, (owner)(borrowed)(collateral)(call_price) ) - }; - - typedef eosio::multi_index > - > margins; - - - struct loan_position { - account_name owner; /// the owner - double interest_shares; /// the number of shares in the total lent pool - - uint64_t primary_key()const { return owner; } - - EOSLIB_SERIALIZE( loan_position, (owner)(interest_shares) ) - }; - - typedef eosio::multi_index loans; - - /** - * Maintains a state along with the cache of margin positions and/or limit orders. - */ - struct market_state { - market_state( account_name this_contract, symbol_type market_symbol, exchange_accounts& acnts ); - - const exchange_state& initial_state()const; - void margin_call( extended_symbol debt_type ); - void lend( account_name lender, const extended_asset& debt ); - void unlend( account_name lender, double ishares, const extended_symbol& sym ); - void update_margin( account_name borrower, const extended_asset& delta_debt, - const extended_asset& delta_collateral ); - void cover_margin( account_name borrower, const extended_asset& cover_amount ); - - void save(); - - symbol_name marketid; - exchange_state exstate; - - markets market_table; - margins base_margins; - margins quote_margins; - loans base_loans; - loans quote_loans; - - private: - exchange_accounts& _accounts; - markets::const_iterator market_state_itr; - - void cover_margin( account_name borrower, margins& m, exchange_state::connector& c, - const extended_asset& cover_amount ); - void adjust_margin( account_name borrower, margins& m, exchange_state::connector& c, - const extended_asset& delta_debt, const extended_asset& delta_col ); - void adjust_lend_shares( account_name lender, loans& l, double delta ); - void margin_call( exchange_state::connector& c, margins& m ); - }; - -} /// namespace eosio diff --git a/contracts/exchange/test_exchange.cpp b/contracts/exchange/test_exchange.cpp deleted file mode 100644 index a6a1f9dae96..00000000000 --- a/contracts/exchange/test_exchange.cpp +++ /dev/null @@ -1,518 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -/* -#include -#include -#include -#include - -#include -#include "fixed.hpp" -*/ - -#include -#include - -//#include "bfp/lib/posit.h" - -using namespace std; - -typedef long double real_type; -typedef double token_type; - - -/* -struct margin_position { - account_name owner; - uint64_t exchange_id; - asset lent; - asset collateral; - uint64_t open_time; - - uint64_t primary_key()const{ return owner; } - uint256_t by_owner_ex_lent_collateral()const { - - } - - real_type by_call_price()const { - return collateral.amount / real_type(lent.amount); - } -}; -*/ - - - -template -Real Abs(Real Nbr) -{ - if( Nbr >= 0 ) - return Nbr; - else - return -Nbr; -} - -template -Real sqrt_safe( const Real Nbr) -{ - return sqrt(Nbr); -// cout << " " << Nbr << "\n";; - Real Number = Nbr / Real(2.0); - const Real Tolerance = Real(double(1.0e-12)); - //cout << "tol: " << Tolerance << "\n"; - - Real Sq; - Real Er; - do { - auto tmp = Nbr / Number; - tmp += Number; - tmp /= real_type(2.0); - if( Number == tmp ) break; - Number = tmp; - Sq = Number * Number; - Er = Abs(Sq - Nbr); -// wdump((Er.getDouble())(1.0e-8)(Tolerance.getDouble())); -// wdump(((Er - Tolerance).getDouble())); - }while( Er >= Tolerance ); - - return Number; -} - -typedef __uint128_t uint128_t; -typedef string account_name; -typedef string symbol_type; - -static const symbol_type exchange_symbol = "EXC"; - -struct asset { - token_type amount; - symbol_type symbol; -}; - -struct margin_key { - symbol_type lent; - symbol_type collat; -}; - -struct margin { - asset lent; - symbol_type collateral_symbol; - real_type least_collateralized_rate; -}; - -struct user_margin { - asset lent; - asset collateral; - - real_type call_price()const { - return collateral.amount / real_type(lent.amount); - } -}; - -struct exchange_state; -struct connector { - asset balance; - real_type weight = 0.5; - token_type total_lent; /// lent from maker to users - token_type total_borrowed; /// borrowed from users to maker - token_type total_available_to_lend; /// amount available to borrow - token_type interest_pool; /// total interest earned but not claimed, - /// each user can claim user_lent - - void borrow( exchange_state& ex, const asset& amount_to_borrow ); - asset convert_to_exchange( exchange_state& ex, const asset& input ); - asset convert_from_exchange( exchange_state& ex, const asset& input ); -}; - - -struct balance_key { - account_name owner; - symbol_type symbol; - - friend bool operator < ( const balance_key& a, const balance_key& b ) { - return std::tie( a.owner, a.symbol ) < std::tie( b.owner, b.symbol ); - } - friend bool operator == ( const balance_key& a, const balance_key& b ) { - return std::tie( a.owner, a.symbol ) == std::tie( b.owner, b.symbol ); - } -}; - -real_type fee = 1;//.9995; - - -int64_t maxtrade = 20000ll; - -struct exchange_state { - token_type supply; - symbol_type symbol = exchange_symbol; - - connector base; - connector quote; - - void transfer( account_name user, asset q ) { - output[balance_key{user,q.symbol}] += q.amount; - } - map output; - vector margins; -}; - -/* -void connector::borrow( exchange_state& ex, account_name user, - asset amount_to_borrow, - asset collateral, - user_margin& marg ) { - FC_ASSERT( amount_to_borrow.amount < balance.amount, "attempt to borrow too much" ); - lent.amount += amount_to_borrow.amount; - balance.amount -= amount_to_borrow.amount; - ex.transfer( user, amount_to_borrow ); - - marg.collateral.amount += collateral.amount; - marg.lent.amount += amount_to_borrow.amount; - auto p = marg.price(); - - if( collateral.symbol == ex.symbol ) { - if( p > ex_margin.least_collateralized_rate ) - ex_margin.least_collateralized_rate = p; - } - else if( collateral.symbol == peer_margin.collateral.symbol ) { - if( p > peer_margin.least_collateralized_rate ) - peer_margin.least_collateralized_rate = p; - } -} -*/ - -asset connector::convert_to_exchange( exchange_state& ex, const asset& input ) { - - real_type R(ex.supply); - real_type S(balance.amount+input.amount); - real_type F(weight); - real_type T(input.amount); - real_type ONE(1.0); - - auto E = R * (ONE - std::pow( ONE + T / S, F) ); - - - //auto real_issued = real_type(ex.supply) * (sqrt_safe( 1.0 + (real_type(input.amount) / (balance.amount+input.amount))) - 1.0); - //auto real_issued = real_type(ex.supply) * (std::pow( 1.0 + (real_type(input.amount) / (balance.amount+input.amount)), weight) - real_type(1.0)); - //auto real_issued = R * (std::pow( ONE + (T / S), F) - ONE); - - //wdump((double(E))(double(real_issued))); - token_type issued = -E; //real_issued; - - - ex.supply += issued; - balance.amount += input.amount; - - return asset{ issued, exchange_symbol }; -} - -asset connector::convert_from_exchange( exchange_state& ex, const asset& input ) { - - real_type R(ex.supply - input.amount); - real_type S(balance.amount); - real_type F(weight); - real_type E(input.amount); - real_type ONE(1.0); - - real_type T = S * (std::pow( ONE + E/R, ONE/F) - ONE); - - - /* - real_type base = real_type(1.0) + ( real_type(input.amount) / real_type(ex.supply-input.amount)); - auto out = (balance.amount * ( std::pow(base,1.0/weight) - real_type(1.0) )); - */ - auto out = T; - -// edump((double(out-T))(double(out))(double(T))); - - ex.supply -= input.amount; - balance.amount -= token_type(out); - return asset{ token_type(out), balance.symbol }; -} - - -void eosio_assert( bool test, const string& msg ) { - if( !test ) throw std::runtime_error( msg ); -} - -void print_state( const exchange_state& e ); - - - -/** - * Given the current state, calculate the new state - */ -exchange_state convert( const exchange_state& current, - account_name user, - asset input, - asset min_output, - asset* out = nullptr) { - - eosio_assert( min_output.symbol != input.symbol, "cannot convert" ); - - exchange_state result(current); - - asset initial_output = input; - - if( input.symbol != exchange_symbol ) { - if( input.symbol == result.base.balance.symbol ) { - initial_output = result.base.convert_to_exchange( result, input ); - } - else if( input.symbol == result.quote.balance.symbol ) { - initial_output = result.quote.convert_to_exchange( result, input ); - } - else eosio_assert( false, "invalid symbol" ); - } else { - if( min_output.symbol == result.base.balance.symbol ) { - initial_output = result.base.convert_from_exchange( result, initial_output ); - } - else if( min_output.symbol == result.quote.balance.symbol ) { - initial_output= result.quote.convert_from_exchange( result, initial_output ); - } - else eosio_assert( false, "invalid symbol" ); - } - - - - asset final_output = initial_output; - -// std::cerr << "\n\nconvert " << input.amount << " "<< input.symbol << " => " << final_output.amount << " " << final_output.symbol << " final: " << min_output.symbol << " \n"; - - result.output[ balance_key{user,final_output.symbol} ] += final_output.amount; - result.output[ balance_key{user,input.symbol} ] -= input.amount; - - if( min_output.symbol != final_output.symbol ) { - return convert( result, user, final_output, min_output, out ); - } - - if( out ) *out = final_output; - return result; -} - -/* VALIDATE MARGIN ALGORITHM - * - * Given an initial condition, verify that all margin positions can be filled. - * - * Assume 3 assets, B, Q, and X and the notation LENT-COLLAT we get the following - * pairs: - * - * B-X - * B-A - * A-X - * A-B - * X-A - * X-B - * - * We assume that pairs of the same lent-type have to be simultainously filled, - * as filling one could make it impossible to fill the other. - * - * -void validate_margin( exchange_state& e ) { - for( const auto& pos : e.margins ) { - token_type min_collat = pos.lent.amount * pos.least_collateralized_rate; - asset received; - e = convert( e, "user", asset{ min_collat, pos.first.collat }, pos.lent, &received ); - FC_ASSERT( received > pos.lent.amount, "insufficient collateral" ); - - received.amount -= pos.lent.amount; - e = convert( e, "user", received, asset{ token_type(0), pos.collateral_symbol} ); - } -} -*/ - - - - - -/** - * A user has Collateral C and wishes to borrow B, so we give user B - * provided that C is enough to buy B back after removing it from market and - * that no margin calls would be triggered. - */ -exchange_state borrow( const exchange_state& current, account_name user, - asset amount_to_borrow, - asset collateral_provided ) { - FC_ASSERT( amount_to_borrow.symbol != collateral_provided.symbol ); - - /// lookup the margin position for user - /// update user's margin position - /// update least collateralized margin position on state - /// remove amount_to_borrow from exchange - /// lock collateral for user - /// simulate complete margin calls - return exchange_state(); -} - -exchange_state cover( const exchange_state& current, account_name user, - asset amount_to_cover, asset collateral_to_cover_with ) -{ - /// lookup existing position for user/debt/collat - /// verify collat > collateral_to_cover_with - /// sell collateral_to_cover_with for debt on market - /// reduce debt by proceeds - /// add proceeds to connector - // - if borrowed from user, reduce borrowed from user - /// calculate new call price and update least collateralized position - /// simulate complete margin calls - return exchange_state(); -} - -exchange_state lend( const exchange_state& current, account_name lender, - asset asset_to_lend ) { - /// add to pool of funds available for lending and buy SHARES in - /// interest pool at current rate. - return exchange_state(); -} - -exchange_state unlend( const exchange_state& current, account_name lender, - asset asset_to_lend ) { - /// sell shares in interest pool at current rate - /// this is permitable so long as total borrowed from users remains less than - /// total available to lend. Otherwise, margin is called on the least - /// collateralized position. - return exchange_state(); -} - - - -void print_state( const exchange_state& e ) { - std::cerr << "\n-----------------------------\n"; - std::cerr << "supply: " << e.supply << "\n"; - std::cerr << "base: " << e.base.balance.amount << " " << e.base.balance.symbol << "\n"; - std::cerr << "quote: " << e.quote.balance.amount << " " << e.quote.balance.symbol << "\n"; - - for( const auto& item : e.output ) { - cerr << item.first.owner << " " << item.second << " " << item.first.symbol << "\n"; - } - std::cerr << "\n-----------------------------\n"; -} - - -int main( int argc, char** argv ) { - // std::cerr << "root: " << double(root.numerator())/root.denominator() << "\n"; - - - exchange_state state; - state.supply = 100000000000ll; - //state.base.weight = state.total_weight / 2.; - state.base.balance.amount = 100000000; - state.base.balance.symbol = "USD"; - state.base.weight = .49; - //state.quote.weight = state.total_weight / 2.; - state.quote.balance.amount = state.base.balance.amount; - state.quote.balance.symbol = "BTC"; - state.quote.weight = .51; - - print_state( state ); - - //state = convert( state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - - auto start = fc::time_point::now(); - for( uint32_t i = 0; i < 10000; ++i ) { - if( rand() % 2 == 0 ) - state = convert( state, "dan", asset{ token_type(uint32_t(rand())%maxtrade), "USD"}, asset{ 0, "BTC" } ); - else - state = convert( state, "dan", asset{ token_type(uint32_t(rand())%maxtrade), "BTC"}, asset{ 0, "USD" } ); - } - for( const auto& item : state.output ) { - if( item.second > 0 ) { - if( item.first.symbol == "USD" ) - state = convert( state, "dan", asset{ item.second, item.first.symbol}, asset{ 0, "BTC" } ); - else - state = convert( state, "dan", asset{ item.second, item.first.symbol}, asset{ 0, "USD" } ); - break; - } - } - print_state( state ); - - auto end = fc::time_point::now(); - wdump((end-start)); - /* - auto new_state = convert( state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 100, "USD"}, asset{ 0, "BTC" } ); - - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 92.5-0.08-.53, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 100, "BTC"}, asset{ 0, "USD" } ); - */ - - //new_state = convert( new_state, "dan", asset{ 442+487-733+280+349+4.493+62.9, "BTC"}, asset{ 0, "USD" } ); - /* - auto new_state = convert( state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 442+487, "BTC"}, asset{ 0, "USD" } ); - */ - /* - new_state = convert( new_state, "dan", asset{ 487, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 442, "BTC"}, asset{ 0, "USD" } ); - */ - //new_state = convert( new_state, "dan", asset{ 526, "BTC"}, asset{ 0, "USD" } ); - //new_state = convert( new_state, "dan", asset{ 558, "BTC"}, asset{ 0, "USD" } ); - //new_state = convert( new_state, "dan", asset{ 1746, "BTC"}, asset{ 0, "USD" } ); - /* - new_state = convert( new_state, "dan", asset{ 526, "BTC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "EXC" } ); - new_state = convert( new_state, "dan", asset{ 500, "BTC"}, asset{ 0, "EXC" } ); - new_state = convert( new_state, "dan", asset{ 10, "EXC"}, asset{ 0, "USD" } ); - new_state = convert( new_state, "dan", asset{ 10, "EXC"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 500, "USD"}, asset{ 0, "BTC" } ); - new_state = convert( new_state, "dan", asset{ 2613, "BTC"}, asset{ 0, "USD" } ); - */ - - - - /* - auto new_state = convert( state, "dan", asset{ 10, "EXC"}, asset{ 0, "USD" } ); - - print_state( new_state ); - - new_state = convert( state, "dan", asset{ 10, "EXC"}, asset{ 0, "BTC" } ); - print_state( new_state ); - new_state = convert( new_state, "dan", asset{ 10, "EXC"}, asset{ 0, "USD" } ); - print_state( new_state ); - - - //new_state = convert( new_state, "dan", asset{ 52, "USD"}, asset{ 0, "EXC" } ); - */ - - return 0; -} - - - -#if 0 - -0. if( margin_fault ) - Convert Least Collateral - if( margin fault )) - defer - -if( margin_fault ) assert( false, "busy calling" ); - -1. Fill Incoming Order -2. Check Counter Order -3. if( margin fault ) - Defer Trx to finish margin call - - -#endif diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 86c6e97cb36..338a8910fff 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -19,7 +19,7 @@ add_executable( plugin_test ${UNIT_TESTS} ${WASM_UNIT_TESTS} main.cpp) target_link_libraries( plugin_test eosio_testing eosio_chain chainbase eos_utilities chain_plugin wallet_plugin abi_generator fc ${PLATFORM_SPECIFIC_LIBS} ) target_include_directories( plugin_test PUBLIC ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include ) -add_dependencies(plugin_test asserter test_api test_api_mem test_api_db test_api_multi_index exchange proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig) +add_dependencies(plugin_test asserter test_api test_api_mem test_api_db test_api_multi_index proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig) # configure_file(${CMAKE_CURRENT_SOURCE_DIR}/core_symbol.py.in ${CMAKE_CURRENT_BINARY_DIR}/core_symbol.py) diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 85be1bd940b..25c9b188cfc 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -601,18 +601,6 @@ errorExit("Failed to lock wallet %s" % (defproduceraWallet.name)) - Print("Exchange Contract Tests") - Print("upload exchange contract") - - contractDir="contracts/exchange" - wasmFile="exchange.wasm" - abiFile="exchange.abi" - Print("Publish exchange contract") - trans=node.publishContract(exchangeAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - cmdError("%s set contract exchange" % (ClientName)) - errorExit("Failed to publish contract.") - contractDir="contracts/simpledb" wasmFile="simpledb.wasm" abiFile="simpledb.abi" @@ -622,7 +610,7 @@ if retMap is None: errorExit("Failed to publish, but should have returned a details map") if retMap["returncode"] == 0 or retMap["returncode"] == 139: # 139 SIGSEGV - errorExit("FAILURE - set contract exchange failed", raw=True) + errorExit("FAILURE - set contract simpledb failed", raw=True) else: Print("Test successful, %s returned error code: %d" % (ClientName, retMap["returncode"])) diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 7442b00a69e..41f484269cf 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -28,7 +28,7 @@ target_include_directories( unit_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/include ) -add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index exchange eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig payloadless tic_tac_toe deferred_test) +add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig payloadless tic_tac_toe deferred_test) #Manually run unit_test for all supported runtimes #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose diff --git a/unittests/exchange_tests.cpp b/unittests/exchange_tests.cpp deleted file mode 100644 index 28e8ac563ba..00000000000 --- a/unittests/exchange_tests.cpp +++ /dev/null @@ -1,358 +0,0 @@ -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include - -#include -#include - -#ifdef NON_VALIDATING_TEST -#define TESTER tester -#else -#define TESTER validating_tester -#endif - -using namespace eosio; -using namespace eosio::chain; -using namespace eosio::testing; -using namespace fc; - -#define A(X) asset::from_string( #X ) - -struct margin_state { - extended_asset total_lendable; - extended_asset total_lent; - double least_collateralized = 0; - double interest_shares = 0; -}; -FC_REFLECT( margin_state, (total_lendable)(total_lent)(least_collateralized)(interest_shares) ) - -struct exchange_state { - account_name manager; - extended_asset supply; - uint32_t fee = 0; - - struct connector { - extended_asset balance; - uint32_t weight = 500; - margin_state peer_margin; - }; - - connector base; - connector quote; -}; - -FC_REFLECT( exchange_state::connector, (balance)(weight)(peer_margin) ); -FC_REFLECT( exchange_state, (manager)(supply)(fee)(base)(quote) ); - -class exchange_tester : public TESTER { - public: - auto push_action(account_name contract, - const account_name& signer, - const action_name &name, const variant_object &data ) { - string action_type_name = abi_ser.get_action_type(name); - - action act; - act.account = contract; - act.name = name; - act.authorization = vector{{signer, config::active_name}}; - act.data = abi_ser.variant_to_binary(action_type_name, data, abi_serializer_max_time); - - signed_transaction trx; - trx.actions.emplace_back(std::move(act)); - set_transaction_headers(trx); - trx.sign(get_private_key(signer, "active"), control->get_chain_id()); - return push_transaction(trx); - } - - asset get_balance(const account_name& account) const { - return get_currency_balance(N(exchange), symbol(SY(4,CUR)), account); - } - - exchange_state get_market_state( account_name exchange, symbol sym ) { - - uint64_t s = sym.value() >> 8; - const auto& db = control->db(); - const auto* tbl = db.find(boost::make_tuple(exchange, s, N(markets))); - if (tbl) { - const auto *obj = db.find(boost::make_tuple(tbl->id, s)); - if( obj ) { - fc::datastream ds(obj->value.data(), obj->value.size()); - exchange_state result; - fc::raw::unpack( ds, result ); - return result; - } - } - FC_ASSERT( false, "unknown market state" ); - } - - extended_asset get_exchange_balance( account_name exchange, account_name currency, - symbol sym, account_name owner ) { - const auto& db = control->db(); - const auto* tbl = db.find(boost::make_tuple(exchange, owner, N(exaccounts))); - - if (tbl) { - const auto *obj = db.find(boost::make_tuple(tbl->id, owner)); - if( obj ) { - fc::datastream ds(obj->value.data(), obj->value.size()); - account_name own; - flat_map, int64_t> balances; - - fc::raw::unpack( ds, own ); - fc::raw::unpack( ds, balances); - - // wdump((balances)); - auto b = balances[ make_pair( sym, currency ) ]; - return extended_asset( asset( b, sym ), currency ); - } - } - return extended_asset(); - } - - double get_lent_shares( account_name exchange, symbol market, account_name owner, bool base ) - { - const auto& db = control->db(); - - auto scope = ((market.value() >> 8) << 4) + (base ? 1 : 2); - - const auto* tbl = db.find(boost::make_tuple(exchange, scope, N(loans))); - - if (tbl) { - const auto *obj = db.find(boost::make_tuple(tbl->id, owner)); - if( obj ) { - fc::datastream ds(obj->value.data(), obj->value.size()); - account_name own; - double interest_shares; - - fc::raw::unpack( ds, own ); - fc::raw::unpack( ds, interest_shares); - - return interest_shares; - } - } - FC_ASSERT( false, "unable to find loan balance" ); - } - - void deploy_exchange( account_name ac ) { - create_account( ac ); - set_code( ac, exchange_wast ); - } - - void create_currency( name contract, name signer, asset maxsupply ) { - push_action(contract, signer, N(create), mutable_variant_object() - ("issuer", contract ) - ("maximum_supply", maxsupply ) - ("can_freeze", 0) - ("can_recall", 0) - ("can_whitelist", 0) - ); - } - - void issue( name contract, name signer, name to, asset amount ) { - push_action( contract, signer, N(issue), mutable_variant_object() - ("to", to ) - ("quantity", amount ) - ("memo", "") - ); - } - - auto trade( name ex_contract, name signer, symbol market, - extended_asset sell, extended_asset min_receive ) - { - wdump((market)(sell)(min_receive)); - wdump((market.to_string())); - wdump((fc::variant(market).as_string())); - wdump((fc::variant(market).as())); - return push_action( ex_contract, signer, N(trade), mutable_variant_object() - ("seller", signer ) - ("market", market ) - ("sell", sell) - ("min_receive", min_receive) - ("expire", 0) - ("fill_or_kill", 1) - ); - } - - auto deposit( name exchangecontract, name signer, extended_asset amount ) { - return push_action( amount.contract, signer, N(transfer), mutable_variant_object() - ("from", signer ) - ("to", exchangecontract ) - ("quantity", amount.quantity ) - ("memo", "deposit") - ); - } - - auto lend( name contract, name signer, extended_asset quantity, symbol market ) { - return push_action( contract, signer, N(lend), mutable_variant_object() - ("lender", signer ) - ("market", market ) - ("quantity", quantity ) - ); - } - auto unlend( name contract, name signer, double interest_shares, extended_symbol interest_symbol, symbol market ) { - return push_action( contract, signer, N(unlend), mutable_variant_object() - ("lender", signer ) - ("market", market ) - ("interest_shares", interest_shares) - ("interest_symbol", interest_symbol) - ); - } - - auto create_exchange( name contract, name signer, - extended_asset base_deposit, - extended_asset quote_deposit, - asset exchange_supply ) { - return push_action( contract, signer, N(createx), mutable_variant_object() - ("creator", signer) - ("initial_supply", exchange_supply) - ("fee", 0) - ("base_deposit", base_deposit) - ("quote_deposit", quote_deposit) - ); - } - - - exchange_tester() - :TESTER(),abi_ser(json::from_string(exchange_abi).as(), abi_serializer_max_time) - { - create_account( N(dan) ); - create_account( N(trader) ); - - deploy_exchange( N(exchange) ); - - create_currency( N(exchange), N(exchange), A(1000000.00 USD) ); - create_currency( N(exchange), N(exchange), A(1000000.00 BTC) ); - - issue( N(exchange), N(exchange), N(dan), A(1000.00 USD) ); - issue( N(exchange), N(exchange), N(dan), A(1000.00 BTC) ); - - deposit( N(exchange), N(dan), extended_asset( A(500.00 USD), N(exchange) ) ); - deposit( N(exchange), N(dan), extended_asset( A(500.00 BTC), N(exchange) ) ); - - create_exchange( N(exchange), N(dan), - extended_asset( A(400.00 USD), N(exchange) ), - extended_asset( A(400.00 BTC), N(exchange) ), - A(10000000.00 EXC) ); - - produce_block(); - } - - abi_serializer abi_ser; -}; - -BOOST_AUTO_TEST_SUITE(exchange_tests) - -BOOST_AUTO_TEST_CASE( bootstrap ) try { - auto expected = asset::from_string( "1000000.0000 CUR" ); - exchange_tester t; - t.create_currency( N(exchange), N(exchange), expected ); - t.issue( N(exchange), N(exchange), N(exchange), expected ); - auto actual = t.get_currency_balance(N(exchange), expected.get_symbol(), N(exchange)); - BOOST_REQUIRE_EQUAL(expected, actual); -} FC_LOG_AND_RETHROW() /// test_api_bootstrap - - -BOOST_AUTO_TEST_CASE( exchange_create ) try { - auto expected = asset::from_string( "1000000.0000 CUR" ); - exchange_tester t; - - t.issue( N(exchange), N(exchange), N(trader), A(2000.00 BTC) ); - t.issue( N(exchange), N(exchange), N(trader), A(2000.00 USD) ); - - t.deposit( N(exchange), N(trader), extended_asset( A(1500.00 USD), N(exchange) ) ); - t.deposit( N(exchange), N(trader), extended_asset( A(1500.00 BTC), N(exchange) ) ); - - auto trader_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(trader) ); - auto trader_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(trader) ); - auto dan_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(dan) ); - auto dan_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(dan) ); - - auto dan_ex_exc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"EXC"), N(dan) ); - wdump((dan_ex_exc)); - - auto result = t.trade( N(exchange), N(trader), symbol(2,"EXC"), - extended_asset( A(10.00 BTC), N(exchange) ), - extended_asset( A(0.01 USD), N(exchange) ) ); - - for( const auto& at : result->action_traces ) - ilog( "${s}", ("s",at.console) ); - - trader_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(trader) ); - trader_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(trader) ); - wdump((trader_ex_btc.quantity)); - wdump((trader_ex_usd.quantity)); - - result = t.trade( N(exchange), N(trader), symbol(2,"EXC"), - extended_asset( A(9.75 USD), N(exchange) ), - extended_asset( A(0.01 BTC), N(exchange) ) ); - - trader_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(trader) ); - trader_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(trader) ); - - for( const auto& at : result->action_traces ) - ilog( "${s}", ("s",at.console) ); - - wdump((trader_ex_btc.quantity)); - wdump((trader_ex_usd.quantity)); - - BOOST_REQUIRE_EQUAL( trader_ex_usd.quantity, A(1500.00 USD) ); - BOOST_REQUIRE_EQUAL( trader_ex_btc.quantity, A(1499.99 BTC) ); - - wdump((t.get_market_state( N(exchange), symbol(2,"EXC") ) )); - - //BOOST_REQUIRE_EQUAL(expected, actual); -} FC_LOG_AND_RETHROW() /// test_api_bootstrap - - -BOOST_AUTO_TEST_CASE( exchange_lend ) try { - exchange_tester t; - - t.create_account( N(lender) ); - t.issue( N(exchange), N(exchange), N(lender), A(2000.00 BTC) ); - t.issue( N(exchange), N(exchange), N(lender), A(2000.00 USD) ); - - t.deposit( N(exchange), N(lender), extended_asset( A(1500.00 USD), N(exchange) ) ); - t.deposit( N(exchange), N(lender), extended_asset( A(1500.00 BTC), N(exchange) ) ); - - auto lender_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(lender) ); - auto lender_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(lender) ); - - t.lend( N(exchange), N(lender), extended_asset( A(1000.00 USD), N(exchange) ), symbol(2,"EXC") ); - - lender_ex_usd = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"USD"), N(lender) ); - lender_ex_btc = t.get_exchange_balance( N(exchange), N(exchange), symbol(2,"BTC"), N(lender) ); - - wdump((lender_ex_btc.quantity)); - wdump((lender_ex_usd.quantity)); - - BOOST_REQUIRE_EQUAL( lender_ex_usd.quantity, A(500.00 USD) ); - - auto lentshares = t.get_lent_shares( N(exchange), symbol(2,"EXC"), N(lender), true ); - wdump((lentshares)); - BOOST_REQUIRE_EQUAL( lentshares, 100000 ); - - wdump((t.get_market_state( N(exchange), symbol(2,"EXC") ) )); - - t.unlend( N(exchange), N(lender), lentshares, extended_symbol{ symbol(2,"USD"), N(exchange)}, symbol(2,"EXC") ); - - lentshares = t.get_lent_shares( N(exchange), symbol(2,"EXC"), N(lender), true ); - wdump((lentshares)); - - wdump((t.get_market_state( N(exchange), symbol(2,"EXC") ) )); - - //BOOST_REQUIRE_EQUAL(expected, actual); -} FC_LOG_AND_RETHROW() /// test_api_bootstrap - - - - -BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/multisig_tests.cpp b/unittests/multisig_tests.cpp index 80e5433e71f..4e32a60c6a7 100644 --- a/unittests/multisig_tests.cpp +++ b/unittests/multisig_tests.cpp @@ -6,9 +6,6 @@ #include #include -#include -#include - #include #include @@ -322,7 +319,7 @@ BOOST_FIXTURE_TEST_CASE( propose_with_wrong_requested_auth, eosio_msig_tester ) BOOST_FIXTURE_TEST_CASE( big_transaction, eosio_msig_tester ) try { vector perm = { { N(alice), config::active_name }, { N(bob), config::active_name } }; - auto wasm = wast_to_wasm( exchange_wast ); + auto wasm = wast_to_wasm( eosio_token_wast ); variant pretty_trx = fc::mutable_variant_object() ("expiration", "2020-01-01T00:30") From 2a3a02dec487da7ae08aa7b8bb3718208332a90e Mon Sep 17 00:00:00 2001 From: Paul Calabrese Date: Wed, 29 Aug 2018 14:35:42 -0500 Subject: [PATCH 027/194] Clean up launcher option code --- programs/eosio-launcher/main.cpp | 105 ++++++++++--------------------- 1 file changed, 33 insertions(+), 72 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index d5ae379b4b0..55e96c9f1c2 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -458,6 +458,7 @@ struct launcher_def { void make_custom (); void write_dot_file (); void format_ssh (const string &cmd, const string &host_name, string &ssh_cmd_line); + void do_command(const host_def& host, const string& name, vector> env_pairs, const string& cmd); bool do_ssh (const string &cmd, const string &host_name); void prep_remote_config_dir (eosd_def &node, host_def *host); void launch (eosd_def &node, string >s); @@ -1644,6 +1645,30 @@ launcher_def::get_nodes(const string& node_number_list) { return node_list; } +void +launcher_def::do_command(const host_def& host, const string& name, + vector> env_pairs, const string& cmd) { + if (!host.is_local()) { + string rcmd = "cd " + host.eosio_home + "; "; + for (auto& env_pair : env_pairs) { + rcmd += "export " + env_pair.first + "=" + env_pair.second + "; "; + } + rcmd += cmd; + if (!do_ssh(rcmd, host.host_name)) { + cerr << "Remote command failed for " << name << endl; + exit (-1); + } + } + else { + bp::environment e; + for (auto& env_pair : env_pairs) { + e.emplace(env_pair.first, env_pair.second); + } + bp::child c(cmd, e); + c.wait(); + } +} + void launcher_def::bounce (const string& node_numbers) { auto node_list = get_nodes(node_numbers); @@ -1652,30 +1677,8 @@ launcher_def::bounce (const string& node_numbers) { const eosd_def& node = node_pair.second; string node_num = node.name.substr( node.name.length() - 2 ); cout << "Bouncing " << node.name << endl; - if (!host.is_local()) { - string cmd = "cd " + host.eosio_home + "; " - + "export EOSIO_HOME=" + host.eosio_home + string("; ") - + "export EOSIO_NODE=" + node_num + "; " - + "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; - if (!do_ssh(cmd, host.host_name)) { - cerr << "Unable to bounce " << node.name << endl; - exit (-1); - } - } - else { - string cmd = "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; - bp::child c(cmd, - bp::env["EOSIO_HOME"] = host.eosio_home, - bp::env["EOSIO_NODE"] = node_num ); - - if(!c.running()) { - cerr << "child not running after spawn " << cmd << endl; - for (int i = 0; i > 0; i++) { - if (c.running () ) break; - } - } - c.wait(); - } + string cmd = "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; + do_command(host, node.name, { { "EOSIO_HOME", host.eosio_home }, { "EOSIO_NODE", node_num } }, cmd); } } @@ -1687,32 +1690,10 @@ launcher_def::down (const string& node_numbers) { const eosd_def& node = node_pair.second; string node_num = node.name.substr( node.name.length() - 2 ); cout << "Taking down " << node.name << endl; - if (!host.is_local()) { - string cmd = "cd " + host.eosio_home + "; " - + "export EOSIO_HOME=" + host.eosio_home + "; " - + "export EOSIO_NODE=" + node_num + "; " - + "export EOSIO_TN_RESTART_CONFIG_DIR=" + node.config_dir_name + "; " - + "./scripts/eosio-tn_down.sh"; - if (!do_ssh(cmd, host.host_name)) { - cerr << "Unable to down " << node.name << endl; - exit (-1); - } - } - else { - string cmd = "./scripts/eosio-tn_down.sh "; - bp::child c(cmd, - bp::env["EOSIO_HOME"] = host.eosio_home, - bp::env["EOSIO_NODE"] = node_num, - bp::env["EOSIO_TN_RESTART_CONFIG_DIR"] = node.config_dir_name ); - - if(!c.running()) { - cerr << "child not running after spawn " << cmd << endl; - for (int i = 0; i > 0; i++) { - if (c.running () ) break; - } - } - c.wait(); - } + string cmd = "./scripts/eosio-tn_down.sh "; + do_command(host, node.name, + { { "EOSIO_HOME", host.eosio_home }, { "EOSIO_NODE", node_num }, { "EOSIO_TN_RESTART_CONFIG_DIR", node.config_dir_name } }, + cmd); } } @@ -1723,28 +1704,8 @@ launcher_def::roll (const string& host_names) { for (string host_name: hosts) { cout << "Rolling " << host_name << endl; auto host = find_host_by_name_or_address(host_name); - if (!host->is_local()) { - string cmd = "cd " + host->eosio_home + "; " - + "export EOSIO_HOME=" + host->eosio_home + "; " - + "./scripts/eosio-tn_roll.sh"; - if (!do_ssh(cmd, host_name)) { - cerr << "Unable to roll " << host << endl; - exit (-1); - } - } - else { - string cmd = "./scripts/eosio-tn_roll.sh "; - bp::child c(cmd, - bp::env["EOSIO_HOME"] = host->eosio_home ); - - if(!c.running()) { - cerr << "child not running after spawn " << cmd << endl; - for (int i = 0; i > 0; i++) { - if (c.running () ) break; - } - } - c.wait(); - } + string cmd = "./scripts/eosio-tn_roll.sh "; + do_command(*host, host_name, { { "EOSIO_HOME", host->eosio_home } }, cmd); } } From 4a2eb1522358b23a6c98452eeff59c7150e9b3e8 Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 30 Aug 2018 16:01:49 +0800 Subject: [PATCH 028/194] 5380 enhance cleos to support iteration over scopes & tables --- plugins/chain_api_plugin/chain_api_plugin.cpp | 1 + plugins/chain_plugin/chain_plugin.cpp | 43 +++++++++++++++++++ .../eosio/chain_plugin/chain_plugin.hpp | 25 +++++++++++ programs/cleos/httpc.hpp | 1 + programs/cleos/main.cpp | 17 ++++++++ 5 files changed, 87 insertions(+) diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 58098501f04..0b0328eaa00 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -88,6 +88,7 @@ void chain_api_plugin::plugin_startup() { CHAIN_RO_CALL(get_abi, 200), CHAIN_RO_CALL(get_raw_code_and_abi, 200), CHAIN_RO_CALL(get_table_rows, 200), + CHAIN_RO_CALL(get_table_by_scope, 200), CHAIN_RO_CALL(get_currency_balance, 200), CHAIN_RO_CALL(get_currency_stats, 200), CHAIN_RO_CALL(get_producers, 200), diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 67efb591611..d6f661325ab 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1104,6 +1104,49 @@ read_only::get_table_rows_result read_only::get_table_rows( const read_only::get } } +read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_only::get_table_by_scope_params& p )const { + const auto& d = db.db(); + const auto& idx = d.get_index(); + decltype(idx.lower_bound(boost::make_tuple(0, 0, 0))) lower; + decltype(idx.upper_bound(boost::make_tuple(0, 0, 0))) upper; + + if (p.lower_bound.size()) { + uint64_t scope = convert_to_type(p.lower_bound, "lower_bound scope"); + lower = idx.lower_bound( boost::make_tuple(p.code, scope, p.table)); + } else { + lower = idx.lower_bound(boost::make_tuple(p.code, 0, p.table)); + } + if (p.upper_bound.size()) { + uint64_t scope = convert_to_type(p.upper_bound, "upper_bound scope"); + upper = idx.lower_bound( boost::make_tuple(p.code, scope, 0)); + } else { + upper = idx.lower_bound(boost::make_tuple((uint64_t)p.code + 1, 0, 0)); + } + + auto end = fc::time_point::now() + fc::microseconds(1000 * 10); /// 10ms max time + unsigned int count = 0; + auto itr = lower; + read_only::get_table_by_scope_result result; + for (; itr != upper; ++itr) { + if (p.table && itr->table != p.table) { + if (fc::time_point::now() > end) { + break; + } + continue; + } + get_table_by_scope_result_row row{itr->code, itr->scope, itr->table, itr->payer, itr->count}; + result.rows.emplace_back(fc::variant(row)); + if (++count == p.limit || fc::time_point::now() > end) { + ++itr; + break; + } + } + if (itr != upper) { + result.more = true; + } + return result; +} + vector read_only::get_currency_balance( const read_only::get_currency_balance_params& p )const { const abi_def abi = eosio::chain_apis::get_abi( db, p.code ); diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 2fd665d6255..390e292b269 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -250,6 +250,27 @@ class read_only { get_table_rows_result get_table_rows( const get_table_rows_params& params )const; + struct get_table_by_scope_params { + name code; // mandatory + name table = 0; // optional, act as filter + string lower_bound; // lower bound of scope, optional + string upper_bound; // upper bound of scope, optional + uint32_t limit = 10; + }; + struct get_table_by_scope_result_row { + name code; + name scope; + name table; + name payer; + uint32_t count; + }; + struct get_table_by_scope_result { + vector rows; ///< one row per item, either encoded as hex String or JSON object + bool more = false; ///< true if last element in data is not the end and sizeof data() < limit + }; + + get_table_by_scope_result get_table_by_scope( const get_table_by_scope_params& params )const; + struct get_currency_balance_params { name code; name account; @@ -624,6 +645,10 @@ FC_REFLECT( eosio::chain_apis::read_write::push_transaction_results, (transactio FC_REFLECT( eosio::chain_apis::read_only::get_table_rows_params, (json)(code)(scope)(table)(table_key)(lower_bound)(upper_bound)(limit)(key_type)(index_position)(encode_type) ) FC_REFLECT( eosio::chain_apis::read_only::get_table_rows_result, (rows)(more) ); +FC_REFLECT( eosio::chain_apis::read_only::get_table_by_scope_params, (code)(table)(lower_bound)(upper_bound)(limit) ) +FC_REFLECT( eosio::chain_apis::read_only::get_table_by_scope_result_row, (code)(scope)(table)(payer)(count)); +FC_REFLECT( eosio::chain_apis::read_only::get_table_by_scope_result, (rows)(more) ); + FC_REFLECT( eosio::chain_apis::read_only::get_currency_balance_params, (code)(account)(symbol)); FC_REFLECT( eosio::chain_apis::read_only::get_currency_stats_params, (code)(symbol)); FC_REFLECT( eosio::chain_apis::read_only::get_currency_stats_result, (supply)(max_supply)(issuer)); diff --git a/programs/cleos/httpc.hpp b/programs/cleos/httpc.hpp index 0fcd9b8490d..ec8dd59a14b 100644 --- a/programs/cleos/httpc.hpp +++ b/programs/cleos/httpc.hpp @@ -86,6 +86,7 @@ namespace eosio { namespace client { namespace http { const string get_block_header_state_func = chain_func_base + "/get_block_header_state"; const string get_account_func = chain_func_base + "/get_account"; const string get_table_func = chain_func_base + "/get_table_rows"; + const string get_table_by_scope_func = chain_func_base + "/get_table_by_scope"; const string get_code_func = chain_func_base + "/get_code"; const string get_abi_func = chain_func_base + "/get_abi"; const string get_raw_code_and_abi_func = chain_func_base + "/get_raw_code_and_abi"; diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 4ce7c2dbbd3..0589723cc86 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -2011,6 +2011,23 @@ int main( int argc, char** argv ) { << std::endl; }); + auto getScope = get->add_subcommand( "scope", localized("Retrieve a list of scopes and tables owned by a contract"), false); + getScope->add_option( "contract", code, localized("The contract who owns the table") )->required(); + getScope->add_option( "-t,--table", table, localized("The name of the table as filter") ); + getScope->add_option( "-l,--limit", limit, localized("The maximum number of rows to return") ); + getScope->add_option( "-L,--lower", lower, localized("lower bound of scope") ); + getScope->add_option( "-U,--upper", upper, localized("upper bound of scope") ); + getScope->set_callback([&] { + auto result = call(get_table_by_scope_func, fc::mutable_variant_object("code",code) + ("table",table) + ("lower_bound",lower) + ("upper_bound",upper) + ("limit",limit) + ); + std::cout << fc::json::to_pretty_string(result) + << std::endl; + }); + // currency accessors // get currency balance string symbol; From 9ac871b7125a33e814b5274d42ba95ad6887738a Mon Sep 17 00:00:00 2001 From: Paul Calabrese Date: Thu, 30 Aug 2018 08:36:31 -0500 Subject: [PATCH 029/194] Fix launcher test problems --- tests/Cluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/Cluster.py b/tests/Cluster.py index a3554551220..d146cceb2d3 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -748,6 +748,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): eosioAccount.ownerPublicKey=eosioKeys["public"] eosioAccount.activePrivateKey=eosioKeys["private"] eosioAccount.activePublicKey=eosioKeys["public"] + producerKeys.pop(eosioName) if not walletMgr.importKey(eosioAccount, ignWallet): Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) From 5ff59ab22f84c7d699d2afed2cd17bc56e20adca Mon Sep 17 00:00:00 2001 From: Paul Calabrese Date: Thu, 30 Aug 2018 14:01:55 -0500 Subject: [PATCH 030/194] Add more diagnostic info when bootstrapping fails --- tests/Cluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/Cluster.py b/tests/Cluster.py index d146cceb2d3..5f3d26d3d51 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -718,6 +718,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): for line in bootFile: if p.search(line): Utils.Print("ERROR: bios_boot.sh script resulted in errors. See %s" % (bootlog)) + Utils.Print(line) return None producerKeys=Cluster.parseClusterKeys(totalNodes) From da096b20753d02f565b094f1aa256a5f7f3abd97 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 30 Aug 2018 15:32:03 -0400 Subject: [PATCH 031/194] Return something other then "fixme" in unix socket HTTP support This function is never called, but return something less sloppy --- .../http_plugin/include/eosio/http_plugin/local_endpoint.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp index c59f896c57e..4e549566c49 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp @@ -58,7 +58,7 @@ class local_connection : public lib::enable_shared_from_this { } std::string get_remote_endpoint(lib::error_code & ec) const { - return "fixme"; + return "UNIX Socket Endpoint"; } void pre_init(init_handler callback) { From 9a3a0c17917e82fd7b99897fc7af59b59964b0f9 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 30 Aug 2018 15:32:58 -0400 Subject: [PATCH 032/194] Treat unix-socket-path as string, not boost path Workaround quirk when filenames have spaces --- plugins/http_plugin/http_plugin.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 2f344d82b87..ac70a7d3609 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -350,7 +350,7 @@ namespace eosio { my->mangle_option_names(); if(current_http_plugin_defaults.default_unix_socket_path.length()) cfg.add_options() - (my->unix_socket_path_option_name.c_str(), bpo::value()->default_value(current_http_plugin_defaults.default_unix_socket_path), + (my->unix_socket_path_option_name.c_str(), bpo::value()->default_value(current_http_plugin_defaults.default_unix_socket_path), "The filename (relative to data-dir) to create a unix socket for HTTP RPC; set blank to disable."); if(current_http_plugin_defaults.default_http_port) @@ -431,8 +431,8 @@ namespace eosio { } } - if( options.count( my->unix_socket_path_option_name ) && !options.at( my->unix_socket_path_option_name ).as().empty()) { - boost::filesystem::path sock_path = options.at(my->unix_socket_path_option_name).as(); + if( options.count( my->unix_socket_path_option_name ) && !options.at( my->unix_socket_path_option_name ).as().empty()) { + boost::filesystem::path sock_path = options.at(my->unix_socket_path_option_name).as(); if (sock_path.is_relative()) sock_path = app().data_dir() / sock_path; my->unix_endpoint = asio::local::stream_protocol::endpoint(sock_path.string()); From 8951cfa5cdddcaf569ae452f785ac2444e987471 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 30 Aug 2018 16:18:45 -0400 Subject: [PATCH 033/194] Remove umask shenanigans when creating unix HTTP socket Just leave this up to the environment --- .../http_plugin/include/eosio/http_plugin/local_endpoint.hpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp index 4e549566c49..cbe82bbfaf3 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp @@ -386,9 +386,7 @@ class local_endpoint : public config::socket_type { m_acceptor->open(ep.protocol(),bec); } if (!bec) { - mode_t old_mask = umask(S_IXUSR|S_IXGRP|S_IRWXO); m_acceptor->bind(ep,bec); - umask(old_mask); } if (!bec) { m_acceptor->listen(boost::asio::socket_base::max_listen_connections,bec); From b51ae6969b5faac593df478b61f61e36c44ced5d Mon Sep 17 00:00:00 2001 From: jjnetcn Date: Fri, 31 Aug 2018 09:10:26 +0800 Subject: [PATCH 034/194] cleos support deferred action add --delay-sec option for common action, the default is 0 sec (means no delay) you can set it for delayed transaction --- programs/cleos/main.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index ea9ba60eaf5..2359f40b495 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -167,6 +167,8 @@ bool print_response = false; uint8_t tx_max_cpu_usage = 0; uint32_t tx_max_net_usage = 0; +uint32_t delaysec = 0; + vector tx_permission; eosio::client::http::http_context context; @@ -197,6 +199,8 @@ void add_standard_transaction_options(CLI::App* cmd, string default_permission = cmd->add_option("--max-cpu-usage-ms", tx_max_cpu_usage, localized("set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit)")); cmd->add_option("--max-net-usage", tx_max_net_usage, localized("set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit)")); + + cmd->add_option("--delay-sec", delaysec, localized("set the delay_sec seconds, defaults to 0s")); } vector get_account_permissions(const vector& permissions) { @@ -297,6 +301,7 @@ fc::variant push_transaction( signed_transaction& trx, int32_t extra_kcpu = 1000 trx.max_cpu_usage_ms = tx_max_cpu_usage; trx.max_net_usage_words = (tx_max_net_usage + 7)/8; + trx.delay_sec = delaysec; } if (!tx_skip_sign) { From 941d0a75b7f1947f9115e527f92d2c289cd17342 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 20 Aug 2018 14:42:22 -0500 Subject: [PATCH 035/194] Add block_num & block_time to action_trace --- libraries/chain/apply_context.cpp | 2 ++ libraries/chain/include/eosio/chain/trace.hpp | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 0bbfb8e3aec..04e29407375 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -76,6 +76,8 @@ action_trace apply_context::exec_one() action_trace t(r); t.trx_id = trx_context.id; + t.block_num = control.pending_block_state()->block_num; + t.block_time = control.pending_block_time(); t.act = act; t.console = _pending_console_output.str(); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 41fb8f079a6..8061ae0fc66 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -22,6 +22,9 @@ namespace eosio { namespace chain { uint64_t total_cpu_usage = 0; /// total of inline_traces[x].cpu_usage + cpu_usage transaction_id_type trx_id; ///< the transaction that generated this action + uint32_t block_num = 0; + block_timestamp_type block_time; + }; struct action_trace : public base_action_trace { @@ -49,7 +52,7 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) ) + (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) From f3b0ee9dec87205da99d5b07585450717d4720fc Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 20 Aug 2018 14:43:22 -0500 Subject: [PATCH 036/194] Add irreversible flag to block_state --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 25 +++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index c4d11e1bdee..ca2304e48f1 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -279,7 +279,7 @@ void mongo_db_plugin_impl::applied_transaction( const chain::transaction_trace_p void mongo_db_plugin_impl::applied_irreversible_block( const chain::block_state_ptr& bs ) { try { - if( store_blocks || store_transactions ) { + if( store_blocks || store_block_states || store_transactions ) { queue( irreversible_block_state_queue, bs ); } } catch (fc::exception& e) { @@ -913,17 +913,15 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ using bsoncxx::builder::basic::make_document; using bsoncxx::builder::basic::kvp; - auto blocks = mongo_conn[db_name][blocks_col]; - auto trans = mongo_conn[db_name][trans_col]; const auto block_id = bs->block->id(); const auto block_id_str = block_id.str(); - const auto block_num = bs->block->block_num(); auto now = std::chrono::duration_cast( std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); if( store_blocks ) { + auto blocks = mongo_conn[db_name][blocks_col]; auto ir_block = find_block( blocks, block_id_str ); if( !ir_block ) { _process_accepted_block( bs ); @@ -939,7 +937,26 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ blocks.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); } + if( store_block_states ) { + auto block_states = mongo_conn[db_name][block_states_col]; + auto ir_block = find_block( block_states, block_id_str ); + if( !ir_block ) { + _process_accepted_block( bs ); + ir_block = find_block( block_states, block_id_str ); + if( !ir_block ) return; // should never happen + } + + auto update_doc = make_document( kvp( "$set", make_document( kvp( "irreversible", b_bool{true} ), + kvp( "validated", b_bool{bs->validated} ), + kvp( "in_current_chain", b_bool{bs->in_current_chain} ), + kvp( "updatedAt", b_date{now} ) ) ) ); + + block_states.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); + } + if( store_transactions ) { + const auto block_num = bs->block->block_num(); + auto trans = mongo_conn[db_name][trans_col]; bool transactions_in_block = false; mongocxx::options::bulk_write bulk_opts; bulk_opts.ordered( false ); From 3ad6e2e86ee89bfcaa604f2e14e0b36553ae9526 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 21 Aug 2018 20:17:38 -0500 Subject: [PATCH 037/194] Add transaction_trace status to each action_trace --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index ca2304e48f1..320ecfc3471 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -91,6 +91,7 @@ class mongo_db_plugin_impl { void purge_abi_cache(); bool add_action_trace( mongocxx::bulk_write& bulk_action_traces, const chain::action_trace& atrace, + const chain::transaction_trace_ptr& t, bool executed, const std::chrono::milliseconds& now ); void update_account(const chain::action& act); @@ -704,6 +705,7 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti bool mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces, const chain::action_trace& atrace, + const chain::transaction_trace_ptr& t, bool executed, const std::chrono::milliseconds& now ) { using namespace bsoncxx::types; @@ -734,6 +736,9 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces elog( " JSON: ${j}", ("j", json) ); } } + if( t->receipt.valid() ) { + action_traces_doc.append( kvp( "trx_status", std::string( t->receipt->status ) ) ); + } action_traces_doc.append( kvp( "createdAt", b_date{now} ) ); mongocxx::model::insert_one insert_op{action_traces_doc.view()}; @@ -742,7 +747,7 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces } for( const auto& iline_atrace : atrace.inline_traces ) { - added |= add_action_trace( bulk_action_traces, iline_atrace, executed, now ); + added |= add_action_trace( bulk_action_traces, iline_atrace, t, executed, now ); } return added; @@ -768,7 +773,7 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio for( const auto& atrace : t->action_traces ) { try { - write_atraces |= add_action_trace( bulk_action_traces, atrace, executed, now ); + write_atraces |= add_action_trace( bulk_action_traces, atrace, t, executed, now ); } catch(...) { handle_mongo_exception("add action traces", __LINE__); } From afcfc5eda4f75f723f094aad00b824dcd00c8f90 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 21 Aug 2018 20:18:10 -0500 Subject: [PATCH 038/194] Add ram_delta to action_trace --- libraries/chain/apply_context.cpp | 2 ++ libraries/chain/include/eosio/chain/trace.hpp | 4 ++-- libraries/chain/include/eosio/chain/transaction_context.hpp | 1 + libraries/chain/transaction_context.cpp | 1 + 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 04e29407375..d8385cfa68b 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,6 +32,7 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); + int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { @@ -78,6 +79,7 @@ action_trace apply_context::exec_one() t.trx_id = trx_context.id; t.block_num = control.pending_block_state()->block_num; t.block_time = control.pending_block_time(); + t.ram_delta = trx_context.trx_ram_delta - ram_delta; t.act = act; t.console = _pending_console_output.str(); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 8061ae0fc66..2da08e4721e 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -24,7 +24,7 @@ namespace eosio { namespace chain { transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; - + int64_t ram_delta = 0; }; struct action_trace : public base_action_trace { @@ -52,7 +52,7 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time) ) + (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time)(ram_delta) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 22e8eae36d6..b5811c1a66d 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -87,6 +87,7 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; + int64_t trx_ram_delta = 0; private: bool is_initialized = false; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 64683ebc049..d753bb7ad8a 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -383,6 +383,7 @@ namespace eosio { namespace chain { if( ram_delta > 0 ) { validate_ram_usage.insert( account ); } + trx_ram_delta += ram_delta; } uint32_t transaction_context::update_billed_cpu_time( fc::time_point now ) { From 6a2cf67a45f3ef718dfb306611ffdda80c8cd718 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 08:30:40 -0500 Subject: [PATCH 039/194] Add ram_delta per account to action_trace --- libraries/chain/apply_context.cpp | 4 ++-- libraries/chain/include/eosio/chain/trace.hpp | 4 ++-- libraries/chain/include/eosio/chain/transaction_context.hpp | 2 +- libraries/chain/transaction_context.cpp | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index d8385cfa68b..9ea13ce8b51 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,7 +32,6 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); - int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { @@ -79,7 +78,8 @@ action_trace apply_context::exec_one() t.trx_id = trx_context.id; t.block_num = control.pending_block_state()->block_num; t.block_time = control.pending_block_time(); - t.ram_delta = trx_context.trx_ram_delta - ram_delta; + t.account_ram_delta = std::move( trx_context.account_ram_delta ); + trx_context.account_ram_delta.clear(); t.act = act; t.console = _pending_console_output.str(); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 2da08e4721e..1b390ac8911 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -24,7 +24,7 @@ namespace eosio { namespace chain { transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; - int64_t ram_delta = 0; + flat_map account_ram_delta; }; struct action_trace : public base_action_trace { @@ -52,7 +52,7 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time)(ram_delta) ) + (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time)(account_ram_delta) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index b5811c1a66d..38857e98aeb 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -74,6 +74,7 @@ namespace eosio { namespace chain { vector executed; flat_set bill_to_accounts; flat_set validate_ram_usage; + flat_map account_ram_delta; // reset for each action /// the maximum number of virtual CPU instructions of the transaction that can be safely billed to the billable accounts uint64_t initial_max_billable_cpu = 0; @@ -87,7 +88,6 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; - int64_t trx_ram_delta = 0; private: bool is_initialized = false; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index d753bb7ad8a..44855b37530 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -383,7 +383,7 @@ namespace eosio { namespace chain { if( ram_delta > 0 ) { validate_ram_usage.insert( account ); } - trx_ram_delta += ram_delta; + account_ram_delta[account] += ram_delta; } uint32_t transaction_context::update_billed_cpu_time( fc::time_point now ) { From b01fc32877a2f68bb06479055771d37bdf0bd218 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 12:49:13 -0500 Subject: [PATCH 040/194] Add producer_block_id to action_trace --- libraries/chain/apply_context.cpp | 1 + libraries/chain/controller.cpp | 19 ++++++++++++++----- .../chain/include/eosio/chain/controller.hpp | 1 + libraries/chain/include/eosio/chain/trace.hpp | 4 +++- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 9ea13ce8b51..551b6626cb8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -78,6 +78,7 @@ action_trace apply_context::exec_one() t.trx_id = trx_context.id; t.block_num = control.pending_block_state()->block_num; t.block_time = control.pending_block_time(); + t.producer_block_id = control.pending_producer_block_id(); t.account_ram_delta = std::move( trx_context.account_ram_delta ); trx_context.account_ram_delta.clear(); t.act = act; diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 26d54d7da70..027c78089ab 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -83,6 +83,8 @@ struct pending_state { controller::block_status _block_status = controller::block_status::incomplete; + block_id_type _producer_block_id; + void push() { _db_session.push(); } @@ -868,7 +870,7 @@ struct controller_impl { } /// push_transaction - void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s ) { + void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, const block_id_type& producer_block_id ) { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); auto guard_pending = fc::make_scoped_exit([this](){ @@ -885,6 +887,7 @@ struct controller_impl { } pending->_block_status = s; + pending->_producer_block_id = producer_block_id; pending->_pending_block_state = std::make_shared( *head, when ); // promotes pending schedule (if any) to active pending->_pending_block_state->in_current_chain = true; @@ -953,7 +956,8 @@ struct controller_impl { void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { try { EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); - start_block( b->timestamp, b->confirmed, s ); + auto producer_block_id = b->id(); + start_block( b->timestamp, b->confirmed, s , producer_block_id); transaction_trace_ptr trace; @@ -993,9 +997,9 @@ struct controller_impl { finalize_block(); // this implicitly asserts that all header fields (less the signature) are identical - EOS_ASSERT(b->id() == pending->_pending_block_state->header.id(), + EOS_ASSERT(producer_block_id == pending->_pending_block_state->header.id(), block_validate_exception, "Block ID does not match", - ("producer_block_id",b->id())("validator_block_id",pending->_pending_block_state->header.id())); + ("producer_block_id",producer_block_id)("validator_block_id",pending->_pending_block_state->header.id())); // We need to fill out the pending block state's block because that gets serialized in the reversible block log // in the future we can optimize this by serializing the original and not the copy @@ -1389,7 +1393,7 @@ fork_database& controller::fork_db()const { return my->fork_db; } void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { validate_db_available_size(); - my->start_block(when, confirm_block_count, block_status::incomplete ); + my->start_block(when, confirm_block_count, block_status::incomplete, block_id_type() ); } void controller::finalize_block() { @@ -1521,6 +1525,11 @@ time_point controller::pending_block_time()const { return my->pending->_pending_block_state->header.timestamp; } +block_id_type controller::pending_producer_block_id()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + return my->pending->_producer_block_id; +} + uint32_t controller::last_irreversible_block_num() const { return std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum); } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 21b9e5a6f9f..85adeffd798 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -184,6 +184,7 @@ namespace eosio { namespace chain { time_point pending_block_time()const; block_state_ptr pending_block_state()const; + block_id_type pending_producer_block_id()const; const producer_schedule_type& active_producers()const; const producer_schedule_type& pending_producers()const; diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 1b390ac8911..6b5dd202574 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -24,6 +24,7 @@ namespace eosio { namespace chain { transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; + block_id_type producer_block_id; flat_map account_ram_delta; }; @@ -52,7 +53,8 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time)(account_ram_delta) ) + (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) + (block_num)(block_time)(producer_block_id)(account_ram_delta) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) From 9b54e195490cd0d7f4feebefc227b44e316b0337 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 13:46:46 -0500 Subject: [PATCH 041/194] Add block_num, block_time, producer_block_id to transaction_trace --- libraries/chain/controller.cpp | 3 +++ libraries/chain/include/eosio/chain/trace.hpp | 6 +++++- libraries/chain/transaction_context.cpp | 3 +++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 027c78089ab..fd94e09c244 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -631,6 +631,9 @@ struct controller_impl { if( gtrx.expiration < self.pending_block_time() ) { trace = std::make_shared(); trace->id = gtrx.trx_id; + trace->block_num = self.pending_block_state()->block_num; + trace->block_time = self.pending_block_time(); + trace->producer_block_id = self.pending_producer_block_id(); trace->scheduled = true; trace->receipt = push_receipt( gtrx.trx_id, transaction_receipt::expired, billed_cpu_time_us, 0 ); // expire the transaction emit( self.accepted_transaction, trx ); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 6b5dd202574..912d4d730ba 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -39,6 +39,9 @@ namespace eosio { namespace chain { struct transaction_trace { transaction_id_type id; + uint32_t block_num = 0; + block_timestamp_type block_time; + block_id_type producer_block_id; fc::optional receipt; fc::microseconds elapsed; uint64_t net_usage = 0; @@ -59,5 +62,6 @@ FC_REFLECT( eosio::chain::base_action_trace, FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) -FC_REFLECT( eosio::chain::transaction_trace, (id)(receipt)(elapsed)(net_usage)(scheduled) +FC_REFLECT( eosio::chain::transaction_trace, (id)(block_num)(block_time)(producer_block_id) + (receipt)(elapsed)(net_usage)(scheduled) (action_traces)(failed_dtrx_trace)(except) ) diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 44855b37530..a657109a078 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -26,6 +26,9 @@ namespace eosio { namespace chain { undo_session = c.db().start_undo_session(true); } trace->id = id; + trace->block_num = c.pending_block_state()->block_num; + trace->block_time = c.pending_block_time(); + trace->producer_block_id = c.pending_producer_block_id(); executed.reserve( trx.total_actions() ); EOS_ASSERT( trx.transaction_extensions.size() == 0, unsupported_feature, "we don't support any extensions yet" ); } From e7916ed985567932bb383d489757acb1d2dced10 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 21 Aug 2018 20:18:10 -0500 Subject: [PATCH 042/194] Add ram_delta to action_trace --- libraries/chain/apply_context.cpp | 1 + libraries/chain/include/eosio/chain/transaction_context.hpp | 1 + 2 files changed, 2 insertions(+) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 551b6626cb8..f3b5f1913a2 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,6 +32,7 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); + int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 38857e98aeb..c9917954f13 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -88,6 +88,7 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; + int64_t trx_ram_delta = 0; private: bool is_initialized = false; From fb75d0168b31ad849b4f35bf75b2fd789729bcc8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 08:30:40 -0500 Subject: [PATCH 043/194] Add ram_delta per account to action_trace --- libraries/chain/apply_context.cpp | 1 - libraries/chain/include/eosio/chain/transaction_context.hpp | 1 - 2 files changed, 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index f3b5f1913a2..551b6626cb8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,7 +32,6 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); - int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index c9917954f13..38857e98aeb 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -88,7 +88,6 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; - int64_t trx_ram_delta = 0; private: bool is_initialized = false; From 0deebd22e8577eee7770017b2b88c7133aa491c3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 21 Aug 2018 20:18:10 -0500 Subject: [PATCH 044/194] Add ram_delta to action_trace --- libraries/chain/apply_context.cpp | 1 + libraries/chain/include/eosio/chain/transaction_context.hpp | 1 + 2 files changed, 2 insertions(+) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 551b6626cb8..f3b5f1913a2 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,6 +32,7 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); + int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 38857e98aeb..c9917954f13 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -88,6 +88,7 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; + int64_t trx_ram_delta = 0; private: bool is_initialized = false; From 8064d9c2fc777fe25fbae4db26fd40ef5a40e3d2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 08:30:40 -0500 Subject: [PATCH 045/194] Add ram_delta per account to action_trace --- libraries/chain/apply_context.cpp | 1 - libraries/chain/include/eosio/chain/transaction_context.hpp | 1 - 2 files changed, 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index f3b5f1913a2..551b6626cb8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,7 +32,6 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); - int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index c9917954f13..38857e98aeb 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -88,7 +88,6 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; - int64_t trx_ram_delta = 0; private: bool is_initialized = false; From ef06bcf08396b9fb8d94c9418694e6063c6ea2ee Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 30 Aug 2018 11:52:03 -0500 Subject: [PATCH 046/194] Make producer_block_id optional. Don't set for incomplete block (speclutive). --- libraries/chain/controller.cpp | 12 ++++++++---- libraries/chain/include/eosio/chain/controller.hpp | 6 +++--- libraries/chain/include/eosio/chain/trace.hpp | 4 ++-- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index fd94e09c244..0b6dbf59e20 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -83,7 +83,7 @@ struct pending_state { controller::block_status _block_status = controller::block_status::incomplete; - block_id_type _producer_block_id; + optional _producer_block_id; void push() { _db_session.push(); @@ -873,7 +873,9 @@ struct controller_impl { } /// push_transaction - void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, const block_id_type& producer_block_id ) { + void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, + const optional& producer_block_id ) + { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); auto guard_pending = fc::make_scoped_exit([this](){ @@ -1396,7 +1398,7 @@ fork_database& controller::fork_db()const { return my->fork_db; } void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { validate_db_available_size(); - my->start_block(when, confirm_block_count, block_status::incomplete, block_id_type() ); + my->start_block(when, confirm_block_count, block_status::incomplete, optional() ); } void controller::finalize_block() { @@ -1528,8 +1530,10 @@ time_point controller::pending_block_time()const { return my->pending->_pending_block_state->header.timestamp; } -block_id_type controller::pending_producer_block_id()const { +optional controller::pending_producer_block_id()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + if( my->pending->_block_status == block_status::incomplete ) + return optional(); return my->pending->_producer_block_id; } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 85adeffd798..8747d722b20 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -182,9 +182,9 @@ namespace eosio { namespace chain { time_point fork_db_head_block_time()const; account_name fork_db_head_block_producer()const; - time_point pending_block_time()const; - block_state_ptr pending_block_state()const; - block_id_type pending_producer_block_id()const; + time_point pending_block_time()const; + block_state_ptr pending_block_state()const; + optional pending_producer_block_id()const; const producer_schedule_type& active_producers()const; const producer_schedule_type& pending_producers()const; diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 912d4d730ba..aadcd47947f 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -24,7 +24,7 @@ namespace eosio { namespace chain { transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; - block_id_type producer_block_id; + fc::optional producer_block_id; flat_map account_ram_delta; }; @@ -41,7 +41,7 @@ namespace eosio { namespace chain { transaction_id_type id; uint32_t block_num = 0; block_timestamp_type block_time; - block_id_type producer_block_id; + fc::optional producer_block_id; fc::optional receipt; fc::microseconds elapsed; uint64_t net_usage = 0; From 7c86a1c4c582e5980fe9ee2c1c7928d85a950fab Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 30 Aug 2018 12:22:46 -0500 Subject: [PATCH 047/194] Don't log traces with no producer_block_id --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 320ecfc3471..8385e35c6e4 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -267,6 +267,25 @@ void mongo_db_plugin_impl::accepted_transaction( const chain::transaction_metada void mongo_db_plugin_impl::applied_transaction( const chain::transaction_trace_ptr& t ) { try { + // Traces emitted from an incomplete block leave the producer_block_id as empty. + // + // Avoid adding the action traces or transaction traces to the database if the producer_block_id is empty. + // This way traces from speculatively executed transactions are not included in the Mongo database which can + // avoid potential confusion for consumers of that database. + // + // Due to forks, it could be possible for multiple incompatible action traces with the same block_num and trx_id + // to exist in the database. And if the producer double produces a block, even the block_time may not + // disambiguate the two action traces. Without a producer_block_id to disambiguate and determine if the action + // trace comes from an orphaned fork branching off of the blockchain, consumers of the Mongo DB database may be + // reacting to a stale action trace that never actually executed in the current blockchain. + // + // It is better to avoid this potential confusion by not logging traces from speculative execution, i.e. emitted + // from an incomplete block. This means that traces will not be recorded in speculative read-mode, but + // users should not be using the mongo_db_plugin in that mode anyway. + // + // It is recommended to run mongo_db_plugin in read-mode = read-only. + // + if( !t->producer_block_id.valid() ) return; // always queue since account information always gathered queue( transaction_trace_queue, t ); } catch (fc::exception& e) { From 6aa12dc159bdc434dd2a92ff143f46013babc503 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 30 Aug 2018 20:27:30 -0500 Subject: [PATCH 048/194] Retry get account in case not pushed to mongo yet --- tests/Node.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index a66b5e03fa3..80f15c56665 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -459,7 +459,12 @@ def getEosAccountFromDb(self, name, exitOnError=False): subcommand='db.accounts.findOne({"name" : "%s"})' % (name) if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd)) try: - trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError) + timeout = 3 + for i in range(0,(int(60/timeout) - 1)): + trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError) + if trans is not None: + return trans + time.sleep(timeout) return trans except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") From 7daa2bcaa15c0813ff011d041e069d6b3f08c252 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 30 Aug 2018 20:29:13 -0500 Subject: [PATCH 049/194] In work - producer_block_id null when producing. --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 8385e35c6e4..7270a825e84 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -285,7 +285,12 @@ void mongo_db_plugin_impl::applied_transaction( const chain::transaction_trace_p // // It is recommended to run mongo_db_plugin in read-mode = read-only. // - if( !t->producer_block_id.valid() ) return; +// if( !t->producer_block_id.valid() ) { +// auto v = to_variant_with_abi( *t ); +// ilog("==>${t}", ("t", fc::json::to_string( v ))); +// +// return; +// } // always queue since account information always gathered queue( transaction_trace_queue, t ); } catch (fc::exception& e) { From 77d4ea19e7568564c8438abfd1fea6a44703e47e Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 30 Aug 2018 22:18:46 -0400 Subject: [PATCH 050/194] remove dead comment --- .../http_plugin/include/eosio/http_plugin/local_endpoint.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp index cbe82bbfaf3..4664d1d378a 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/local_endpoint.hpp @@ -396,7 +396,7 @@ class local_endpoint : public config::socket_type { m_acceptor->close(); } log_err(log::elevel::info,"asio listen",bec); - ec = bec;//make_error_code(error::pass_through); + ec = bec; } else { m_state = LISTENING; ec = lib::error_code(); From a9e84fdaff8447353821340c1fda5f1bee1d847a Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 31 Aug 2018 17:50:54 -0400 Subject: [PATCH 051/194] Add wabt to help message for wasm-runtime option --- plugins/chain_plugin/chain_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 58295d52da8..c00cbc8d08e 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -212,7 +212,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("blocks-dir", bpo::value()->default_value("blocks"), "the location of the blocks directory (absolute path or relative to application data dir)") ("checkpoint", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") - ("wasm-runtime", bpo::value()->value_name("wavm/binaryen"), "Override default WASM runtime") + ("wasm-runtime", bpo::value()->value_name("wavm/binaryen/wabt"), "Override default WASM runtime") ("abi-serializer-max-time-ms", bpo::value()->default_value(config::default_abi_serializer_max_time_ms), "Override default maximum ABI serialization time allowed in ms") ("chain-state-db-size-mb", bpo::value()->default_value(config::default_state_size / (1024 * 1024)), "Maximum size (in MiB) of the chain state database") From 1ded5cb42d360c806af268083791943135dba8a9 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Sun, 2 Sep 2018 12:30:32 +0900 Subject: [PATCH 052/194] Print separators properly with `cleos get account` --- programs/cleos/main.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index d7d9d6026e9..43e7bac18e3 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -1487,14 +1487,14 @@ void get_account( const string& accountName, bool json_format ) { std::function dfs_print = [&]( account_name name, int depth ) -> void { auto& p = cache.at(name); std::cout << indent << std::string(depth*3, ' ') << name << ' ' << std::setw(5) << p.required_auth.threshold << ": "; + const char *sep = ""; for ( auto it = p.required_auth.keys.begin(); it != p.required_auth.keys.end(); ++it ) { - if ( it != p.required_auth.keys.begin() ) { - std::cout << ", "; - } - std::cout << it->weight << ' ' << string(it->key); + std::cout << sep << it->weight << ' ' << string(it->key); + sep = ", "; } for ( auto& acc : p.required_auth.accounts ) { - std::cout << acc.weight << ' ' << string(acc.permission.actor) << '@' << string(acc.permission.permission) << ", "; + std::cout << sep << acc.weight << ' ' << string(acc.permission.actor) << '@' << string(acc.permission.permission); + sep = ", "; } std::cout << std::endl; auto it = tree.find( name ); From b9bf617bbbc6edd35a74b63943f16f9e72fb9a95 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 20 Aug 2018 14:42:22 -0500 Subject: [PATCH 053/194] Add block_num & block_time to action_trace --- libraries/chain/apply_context.cpp | 2 ++ libraries/chain/include/eosio/chain/trace.hpp | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 0bbfb8e3aec..04e29407375 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -76,6 +76,8 @@ action_trace apply_context::exec_one() action_trace t(r); t.trx_id = trx_context.id; + t.block_num = control.pending_block_state()->block_num; + t.block_time = control.pending_block_time(); t.act = act; t.console = _pending_console_output.str(); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 41fb8f079a6..8061ae0fc66 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -22,6 +22,9 @@ namespace eosio { namespace chain { uint64_t total_cpu_usage = 0; /// total of inline_traces[x].cpu_usage + cpu_usage transaction_id_type trx_id; ///< the transaction that generated this action + uint32_t block_num = 0; + block_timestamp_type block_time; + }; struct action_trace : public base_action_trace { @@ -49,7 +52,7 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) ) + (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) From f6cce1f4a71df7ccd645cc750c36d21113664df3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 20 Aug 2018 14:43:22 -0500 Subject: [PATCH 054/194] Add irreversible flag to block_state --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 25 +++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index c4d11e1bdee..ca2304e48f1 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -279,7 +279,7 @@ void mongo_db_plugin_impl::applied_transaction( const chain::transaction_trace_p void mongo_db_plugin_impl::applied_irreversible_block( const chain::block_state_ptr& bs ) { try { - if( store_blocks || store_transactions ) { + if( store_blocks || store_block_states || store_transactions ) { queue( irreversible_block_state_queue, bs ); } } catch (fc::exception& e) { @@ -913,17 +913,15 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ using bsoncxx::builder::basic::make_document; using bsoncxx::builder::basic::kvp; - auto blocks = mongo_conn[db_name][blocks_col]; - auto trans = mongo_conn[db_name][trans_col]; const auto block_id = bs->block->id(); const auto block_id_str = block_id.str(); - const auto block_num = bs->block->block_num(); auto now = std::chrono::duration_cast( std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); if( store_blocks ) { + auto blocks = mongo_conn[db_name][blocks_col]; auto ir_block = find_block( blocks, block_id_str ); if( !ir_block ) { _process_accepted_block( bs ); @@ -939,7 +937,26 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ blocks.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); } + if( store_block_states ) { + auto block_states = mongo_conn[db_name][block_states_col]; + auto ir_block = find_block( block_states, block_id_str ); + if( !ir_block ) { + _process_accepted_block( bs ); + ir_block = find_block( block_states, block_id_str ); + if( !ir_block ) return; // should never happen + } + + auto update_doc = make_document( kvp( "$set", make_document( kvp( "irreversible", b_bool{true} ), + kvp( "validated", b_bool{bs->validated} ), + kvp( "in_current_chain", b_bool{bs->in_current_chain} ), + kvp( "updatedAt", b_date{now} ) ) ) ); + + block_states.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); + } + if( store_transactions ) { + const auto block_num = bs->block->block_num(); + auto trans = mongo_conn[db_name][trans_col]; bool transactions_in_block = false; mongocxx::options::bulk_write bulk_opts; bulk_opts.ordered( false ); From 484fd8e7449e8d71dea268ff719a4ff9472b3a3b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 21 Aug 2018 20:17:38 -0500 Subject: [PATCH 055/194] Add transaction_trace status to each action_trace --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index ca2304e48f1..320ecfc3471 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -91,6 +91,7 @@ class mongo_db_plugin_impl { void purge_abi_cache(); bool add_action_trace( mongocxx::bulk_write& bulk_action_traces, const chain::action_trace& atrace, + const chain::transaction_trace_ptr& t, bool executed, const std::chrono::milliseconds& now ); void update_account(const chain::action& act); @@ -704,6 +705,7 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti bool mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces, const chain::action_trace& atrace, + const chain::transaction_trace_ptr& t, bool executed, const std::chrono::milliseconds& now ) { using namespace bsoncxx::types; @@ -734,6 +736,9 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces elog( " JSON: ${j}", ("j", json) ); } } + if( t->receipt.valid() ) { + action_traces_doc.append( kvp( "trx_status", std::string( t->receipt->status ) ) ); + } action_traces_doc.append( kvp( "createdAt", b_date{now} ) ); mongocxx::model::insert_one insert_op{action_traces_doc.view()}; @@ -742,7 +747,7 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces } for( const auto& iline_atrace : atrace.inline_traces ) { - added |= add_action_trace( bulk_action_traces, iline_atrace, executed, now ); + added |= add_action_trace( bulk_action_traces, iline_atrace, t, executed, now ); } return added; @@ -768,7 +773,7 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio for( const auto& atrace : t->action_traces ) { try { - write_atraces |= add_action_trace( bulk_action_traces, atrace, executed, now ); + write_atraces |= add_action_trace( bulk_action_traces, atrace, t, executed, now ); } catch(...) { handle_mongo_exception("add action traces", __LINE__); } From ed0544bf40cca691c5ef3600743773e7f4f435fb Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 21 Aug 2018 20:18:10 -0500 Subject: [PATCH 056/194] Add ram_delta to action_trace --- libraries/chain/apply_context.cpp | 2 ++ libraries/chain/include/eosio/chain/trace.hpp | 4 ++-- libraries/chain/include/eosio/chain/transaction_context.hpp | 1 + libraries/chain/transaction_context.cpp | 1 + 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 04e29407375..d8385cfa68b 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,6 +32,7 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); + int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { @@ -78,6 +79,7 @@ action_trace apply_context::exec_one() t.trx_id = trx_context.id; t.block_num = control.pending_block_state()->block_num; t.block_time = control.pending_block_time(); + t.ram_delta = trx_context.trx_ram_delta - ram_delta; t.act = act; t.console = _pending_console_output.str(); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 8061ae0fc66..2da08e4721e 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -24,7 +24,7 @@ namespace eosio { namespace chain { transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; - + int64_t ram_delta = 0; }; struct action_trace : public base_action_trace { @@ -52,7 +52,7 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time) ) + (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time)(ram_delta) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 22e8eae36d6..b5811c1a66d 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -87,6 +87,7 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; + int64_t trx_ram_delta = 0; private: bool is_initialized = false; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 64683ebc049..d753bb7ad8a 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -383,6 +383,7 @@ namespace eosio { namespace chain { if( ram_delta > 0 ) { validate_ram_usage.insert( account ); } + trx_ram_delta += ram_delta; } uint32_t transaction_context::update_billed_cpu_time( fc::time_point now ) { From 979e00fe11b97a7f95b333a595cb1feb760355af Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 08:30:40 -0500 Subject: [PATCH 057/194] Add ram_delta per account to action_trace --- libraries/chain/apply_context.cpp | 4 ++-- libraries/chain/include/eosio/chain/trace.hpp | 4 ++-- libraries/chain/include/eosio/chain/transaction_context.hpp | 2 +- libraries/chain/transaction_context.cpp | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index d8385cfa68b..9ea13ce8b51 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,7 +32,6 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); - int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { @@ -79,7 +78,8 @@ action_trace apply_context::exec_one() t.trx_id = trx_context.id; t.block_num = control.pending_block_state()->block_num; t.block_time = control.pending_block_time(); - t.ram_delta = trx_context.trx_ram_delta - ram_delta; + t.account_ram_delta = std::move( trx_context.account_ram_delta ); + trx_context.account_ram_delta.clear(); t.act = act; t.console = _pending_console_output.str(); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 2da08e4721e..1b390ac8911 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -24,7 +24,7 @@ namespace eosio { namespace chain { transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; - int64_t ram_delta = 0; + flat_map account_ram_delta; }; struct action_trace : public base_action_trace { @@ -52,7 +52,7 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time)(ram_delta) ) + (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time)(account_ram_delta) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index b5811c1a66d..38857e98aeb 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -74,6 +74,7 @@ namespace eosio { namespace chain { vector executed; flat_set bill_to_accounts; flat_set validate_ram_usage; + flat_map account_ram_delta; // reset for each action /// the maximum number of virtual CPU instructions of the transaction that can be safely billed to the billable accounts uint64_t initial_max_billable_cpu = 0; @@ -87,7 +88,6 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; - int64_t trx_ram_delta = 0; private: bool is_initialized = false; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index d753bb7ad8a..44855b37530 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -383,7 +383,7 @@ namespace eosio { namespace chain { if( ram_delta > 0 ) { validate_ram_usage.insert( account ); } - trx_ram_delta += ram_delta; + account_ram_delta[account] += ram_delta; } uint32_t transaction_context::update_billed_cpu_time( fc::time_point now ) { From 2afaa5619521e0d06c4beb679225d738b0194971 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 12:49:13 -0500 Subject: [PATCH 058/194] Add producer_block_id to action_trace --- libraries/chain/apply_context.cpp | 1 + libraries/chain/controller.cpp | 19 ++++++++++++++----- .../chain/include/eosio/chain/controller.hpp | 1 + libraries/chain/include/eosio/chain/trace.hpp | 4 +++- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 9ea13ce8b51..551b6626cb8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -78,6 +78,7 @@ action_trace apply_context::exec_one() t.trx_id = trx_context.id; t.block_num = control.pending_block_state()->block_num; t.block_time = control.pending_block_time(); + t.producer_block_id = control.pending_producer_block_id(); t.account_ram_delta = std::move( trx_context.account_ram_delta ); trx_context.account_ram_delta.clear(); t.act = act; diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 26d54d7da70..027c78089ab 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -83,6 +83,8 @@ struct pending_state { controller::block_status _block_status = controller::block_status::incomplete; + block_id_type _producer_block_id; + void push() { _db_session.push(); } @@ -868,7 +870,7 @@ struct controller_impl { } /// push_transaction - void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s ) { + void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, const block_id_type& producer_block_id ) { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); auto guard_pending = fc::make_scoped_exit([this](){ @@ -885,6 +887,7 @@ struct controller_impl { } pending->_block_status = s; + pending->_producer_block_id = producer_block_id; pending->_pending_block_state = std::make_shared( *head, when ); // promotes pending schedule (if any) to active pending->_pending_block_state->in_current_chain = true; @@ -953,7 +956,8 @@ struct controller_impl { void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { try { EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); - start_block( b->timestamp, b->confirmed, s ); + auto producer_block_id = b->id(); + start_block( b->timestamp, b->confirmed, s , producer_block_id); transaction_trace_ptr trace; @@ -993,9 +997,9 @@ struct controller_impl { finalize_block(); // this implicitly asserts that all header fields (less the signature) are identical - EOS_ASSERT(b->id() == pending->_pending_block_state->header.id(), + EOS_ASSERT(producer_block_id == pending->_pending_block_state->header.id(), block_validate_exception, "Block ID does not match", - ("producer_block_id",b->id())("validator_block_id",pending->_pending_block_state->header.id())); + ("producer_block_id",producer_block_id)("validator_block_id",pending->_pending_block_state->header.id())); // We need to fill out the pending block state's block because that gets serialized in the reversible block log // in the future we can optimize this by serializing the original and not the copy @@ -1389,7 +1393,7 @@ fork_database& controller::fork_db()const { return my->fork_db; } void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { validate_db_available_size(); - my->start_block(when, confirm_block_count, block_status::incomplete ); + my->start_block(when, confirm_block_count, block_status::incomplete, block_id_type() ); } void controller::finalize_block() { @@ -1521,6 +1525,11 @@ time_point controller::pending_block_time()const { return my->pending->_pending_block_state->header.timestamp; } +block_id_type controller::pending_producer_block_id()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + return my->pending->_producer_block_id; +} + uint32_t controller::last_irreversible_block_num() const { return std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum); } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 21b9e5a6f9f..85adeffd798 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -184,6 +184,7 @@ namespace eosio { namespace chain { time_point pending_block_time()const; block_state_ptr pending_block_state()const; + block_id_type pending_producer_block_id()const; const producer_schedule_type& active_producers()const; const producer_schedule_type& pending_producers()const; diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 1b390ac8911..6b5dd202574 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -24,6 +24,7 @@ namespace eosio { namespace chain { transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; + block_id_type producer_block_id; flat_map account_ram_delta; }; @@ -52,7 +53,8 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id)(block_num)(block_time)(account_ram_delta) ) + (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) + (block_num)(block_time)(producer_block_id)(account_ram_delta) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) From 81c59b38619d98682eabc21b3b613ad862a4327d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 13:46:46 -0500 Subject: [PATCH 059/194] Add block_num, block_time, producer_block_id to transaction_trace --- libraries/chain/controller.cpp | 3 +++ libraries/chain/include/eosio/chain/trace.hpp | 6 +++++- libraries/chain/transaction_context.cpp | 3 +++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 027c78089ab..fd94e09c244 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -631,6 +631,9 @@ struct controller_impl { if( gtrx.expiration < self.pending_block_time() ) { trace = std::make_shared(); trace->id = gtrx.trx_id; + trace->block_num = self.pending_block_state()->block_num; + trace->block_time = self.pending_block_time(); + trace->producer_block_id = self.pending_producer_block_id(); trace->scheduled = true; trace->receipt = push_receipt( gtrx.trx_id, transaction_receipt::expired, billed_cpu_time_us, 0 ); // expire the transaction emit( self.accepted_transaction, trx ); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 6b5dd202574..912d4d730ba 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -39,6 +39,9 @@ namespace eosio { namespace chain { struct transaction_trace { transaction_id_type id; + uint32_t block_num = 0; + block_timestamp_type block_time; + block_id_type producer_block_id; fc::optional receipt; fc::microseconds elapsed; uint64_t net_usage = 0; @@ -59,5 +62,6 @@ FC_REFLECT( eosio::chain::base_action_trace, FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) -FC_REFLECT( eosio::chain::transaction_trace, (id)(receipt)(elapsed)(net_usage)(scheduled) +FC_REFLECT( eosio::chain::transaction_trace, (id)(block_num)(block_time)(producer_block_id) + (receipt)(elapsed)(net_usage)(scheduled) (action_traces)(failed_dtrx_trace)(except) ) diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 44855b37530..a657109a078 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -26,6 +26,9 @@ namespace eosio { namespace chain { undo_session = c.db().start_undo_session(true); } trace->id = id; + trace->block_num = c.pending_block_state()->block_num; + trace->block_time = c.pending_block_time(); + trace->producer_block_id = c.pending_producer_block_id(); executed.reserve( trx.total_actions() ); EOS_ASSERT( trx.transaction_extensions.size() == 0, unsupported_feature, "we don't support any extensions yet" ); } From 7c9703b8fdc4dd8efae8ef210680d5cf86f72194 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 21 Aug 2018 20:18:10 -0500 Subject: [PATCH 060/194] Add ram_delta to action_trace --- libraries/chain/apply_context.cpp | 1 + libraries/chain/include/eosio/chain/transaction_context.hpp | 1 + 2 files changed, 2 insertions(+) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 551b6626cb8..f3b5f1913a2 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,6 +32,7 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); + int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 38857e98aeb..c9917954f13 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -88,6 +88,7 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; + int64_t trx_ram_delta = 0; private: bool is_initialized = false; From c79665eda9d23afec9a5c8194f5d1076462176c1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 08:30:40 -0500 Subject: [PATCH 061/194] Add ram_delta per account to action_trace --- libraries/chain/apply_context.cpp | 1 - libraries/chain/include/eosio/chain/transaction_context.hpp | 1 - 2 files changed, 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index f3b5f1913a2..551b6626cb8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,7 +32,6 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); - int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index c9917954f13..38857e98aeb 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -88,7 +88,6 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; - int64_t trx_ram_delta = 0; private: bool is_initialized = false; From 878821fa477fe44b0cb7c53dbccb32012747ab0c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 21 Aug 2018 20:18:10 -0500 Subject: [PATCH 062/194] Add ram_delta to action_trace --- libraries/chain/apply_context.cpp | 1 + libraries/chain/include/eosio/chain/transaction_context.hpp | 1 + 2 files changed, 2 insertions(+) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 551b6626cb8..f3b5f1913a2 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,6 +32,7 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); + int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 38857e98aeb..c9917954f13 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -88,6 +88,7 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; + int64_t trx_ram_delta = 0; private: bool is_initialized = false; From 7765857d760a66ee0f21d95d7032a1a6c3f742b5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 22 Aug 2018 08:30:40 -0500 Subject: [PATCH 063/194] Add ram_delta per account to action_trace --- libraries/chain/apply_context.cpp | 1 - libraries/chain/include/eosio/chain/transaction_context.hpp | 1 - 2 files changed, 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index f3b5f1913a2..551b6626cb8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -32,7 +32,6 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { action_trace apply_context::exec_one() { auto start = fc::time_point::now(); - int64_t ram_delta = trx_context.trx_ram_delta; const auto& cfg = control.get_global_properties().configuration; try { diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index c9917954f13..38857e98aeb 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -88,7 +88,6 @@ namespace eosio { namespace chain { fc::microseconds leeway = fc::microseconds(3000); int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; - int64_t trx_ram_delta = 0; private: bool is_initialized = false; From 9237692b3dc6534da6a4d2fd7ad1b5daea797f71 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 30 Aug 2018 11:52:03 -0500 Subject: [PATCH 064/194] Make producer_block_id optional. Don't set for incomplete block (speclutive). --- libraries/chain/controller.cpp | 12 ++++++++---- libraries/chain/include/eosio/chain/controller.hpp | 6 +++--- libraries/chain/include/eosio/chain/trace.hpp | 4 ++-- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index fd94e09c244..0b6dbf59e20 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -83,7 +83,7 @@ struct pending_state { controller::block_status _block_status = controller::block_status::incomplete; - block_id_type _producer_block_id; + optional _producer_block_id; void push() { _db_session.push(); @@ -873,7 +873,9 @@ struct controller_impl { } /// push_transaction - void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, const block_id_type& producer_block_id ) { + void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, + const optional& producer_block_id ) + { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); auto guard_pending = fc::make_scoped_exit([this](){ @@ -1396,7 +1398,7 @@ fork_database& controller::fork_db()const { return my->fork_db; } void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { validate_db_available_size(); - my->start_block(when, confirm_block_count, block_status::incomplete, block_id_type() ); + my->start_block(when, confirm_block_count, block_status::incomplete, optional() ); } void controller::finalize_block() { @@ -1528,8 +1530,10 @@ time_point controller::pending_block_time()const { return my->pending->_pending_block_state->header.timestamp; } -block_id_type controller::pending_producer_block_id()const { +optional controller::pending_producer_block_id()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + if( my->pending->_block_status == block_status::incomplete ) + return optional(); return my->pending->_producer_block_id; } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 85adeffd798..8747d722b20 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -182,9 +182,9 @@ namespace eosio { namespace chain { time_point fork_db_head_block_time()const; account_name fork_db_head_block_producer()const; - time_point pending_block_time()const; - block_state_ptr pending_block_state()const; - block_id_type pending_producer_block_id()const; + time_point pending_block_time()const; + block_state_ptr pending_block_state()const; + optional pending_producer_block_id()const; const producer_schedule_type& active_producers()const; const producer_schedule_type& pending_producers()const; diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 912d4d730ba..aadcd47947f 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -24,7 +24,7 @@ namespace eosio { namespace chain { transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; - block_id_type producer_block_id; + fc::optional producer_block_id; flat_map account_ram_delta; }; @@ -41,7 +41,7 @@ namespace eosio { namespace chain { transaction_id_type id; uint32_t block_num = 0; block_timestamp_type block_time; - block_id_type producer_block_id; + fc::optional producer_block_id; fc::optional receipt; fc::microseconds elapsed; uint64_t net_usage = 0; From 576274e2829ea8fe55f90014bdce6c01b048ddc3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 30 Aug 2018 12:22:46 -0500 Subject: [PATCH 065/194] Don't log traces with no producer_block_id --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 320ecfc3471..8385e35c6e4 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -267,6 +267,25 @@ void mongo_db_plugin_impl::accepted_transaction( const chain::transaction_metada void mongo_db_plugin_impl::applied_transaction( const chain::transaction_trace_ptr& t ) { try { + // Traces emitted from an incomplete block leave the producer_block_id as empty. + // + // Avoid adding the action traces or transaction traces to the database if the producer_block_id is empty. + // This way traces from speculatively executed transactions are not included in the Mongo database which can + // avoid potential confusion for consumers of that database. + // + // Due to forks, it could be possible for multiple incompatible action traces with the same block_num and trx_id + // to exist in the database. And if the producer double produces a block, even the block_time may not + // disambiguate the two action traces. Without a producer_block_id to disambiguate and determine if the action + // trace comes from an orphaned fork branching off of the blockchain, consumers of the Mongo DB database may be + // reacting to a stale action trace that never actually executed in the current blockchain. + // + // It is better to avoid this potential confusion by not logging traces from speculative execution, i.e. emitted + // from an incomplete block. This means that traces will not be recorded in speculative read-mode, but + // users should not be using the mongo_db_plugin in that mode anyway. + // + // It is recommended to run mongo_db_plugin in read-mode = read-only. + // + if( !t->producer_block_id.valid() ) return; // always queue since account information always gathered queue( transaction_trace_queue, t ); } catch (fc::exception& e) { From 5b29e6df79fe7e8b86272f4fa0989f35e3310a26 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 30 Aug 2018 20:27:30 -0500 Subject: [PATCH 066/194] Retry get account in case not pushed to mongo yet --- tests/Node.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 39de900aa71..742fe352c39 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -459,7 +459,12 @@ def getEosAccountFromDb(self, name, exitOnError=False): subcommand='db.accounts.findOne({"name" : "%s"})' % (name) if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd)) try: - trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError) + timeout = 3 + for i in range(0,(int(60/timeout) - 1)): + trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError) + if trans is not None: + return trans + time.sleep(timeout) return trans except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") From 0565f3d97226e6f84c8e449093ca144d684f26d6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 30 Aug 2018 20:29:13 -0500 Subject: [PATCH 067/194] In work - producer_block_id null when producing. --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 8385e35c6e4..7270a825e84 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -285,7 +285,12 @@ void mongo_db_plugin_impl::applied_transaction( const chain::transaction_trace_p // // It is recommended to run mongo_db_plugin in read-mode = read-only. // - if( !t->producer_block_id.valid() ) return; +// if( !t->producer_block_id.valid() ) { +// auto v = to_variant_with_abi( *t ); +// ilog("==>${t}", ("t", fc::json::to_string( v ))); +// +// return; +// } // always queue since account information always gathered queue( transaction_trace_queue, t ); } catch (fc::exception& e) { From cd24cc2fb570fc6a229b31d8db1df37949fa97e9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 4 Sep 2018 15:57:22 -0500 Subject: [PATCH 068/194] Use mongo connection pool. Do not log traces unless explicitly configured as a producer. --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 69 +++++++++++++-------- 1 file changed, 42 insertions(+), 27 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 7270a825e84..e1dd30893a3 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -29,6 +29,7 @@ #include #include +#include #include #include @@ -117,6 +118,7 @@ class mongo_db_plugin_impl { uint32_t start_block_num = 0; std::atomic_bool start_block_reached{false}; + bool is_producer = false; bool filter_on_star = true; std::set filter_on; std::set filter_out; @@ -128,8 +130,18 @@ class mongo_db_plugin_impl { std::string db_name; mongocxx::instance mongo_inst; - mongocxx::client mongo_conn; + fc::optional mongo_pool; + + // consum thread + fc::optional mongo_client; mongocxx::collection accounts; + mongocxx::collection trans; + mongocxx::collection trans_traces; + mongocxx::collection action_traces; + mongocxx::collection block_states; + mongocxx::collection blocks; + mongocxx::collection pub_keys; + mongocxx::collection account_controls; size_t max_queue_size = 0; int queue_sleep_time = 0; @@ -283,14 +295,12 @@ void mongo_db_plugin_impl::applied_transaction( const chain::transaction_trace_p // from an incomplete block. This means that traces will not be recorded in speculative read-mode, but // users should not be using the mongo_db_plugin in that mode anyway. // + // Allow logging traces if node is a producer for testing purposes, so a single nodeos can do both for testing. + // // It is recommended to run mongo_db_plugin in read-mode = read-only. // -// if( !t->producer_block_id.valid() ) { -// auto v = to_variant_with_abi( *t ); -// ilog("==>${t}", ("t", fc::json::to_string( v ))); -// -// return; -// } + if( !is_producer && !t->producer_block_id.valid() ) + return; // always queue since account information always gathered queue( transaction_trace_queue, t ); } catch (fc::exception& e) { @@ -337,6 +347,18 @@ void mongo_db_plugin_impl::accepted_block( const chain::block_state_ptr& bs ) { void mongo_db_plugin_impl::consume_blocks() { try { + mongo_client = mongo_pool->acquire(); + auto& mongo_conn = **mongo_client; + + accounts = mongo_conn[db_name][accounts_col]; + trans = mongo_conn[db_name][trans_col]; + trans_traces = mongo_conn[db_name][trans_traces_col]; + action_traces = mongo_conn[db_name][action_traces_col]; + blocks = mongo_conn[db_name][blocks_col]; + block_states = mongo_conn[db_name][block_states_col]; + pub_keys = mongo_conn[db_name][pub_keys_col]; + account_controls = mongo_conn[db_name][account_controls_col]; + while (true) { boost::mutex::scoped_lock lock(mtx); while ( transaction_metadata_queue.empty() && @@ -657,7 +679,6 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti using bsoncxx::builder::basic::make_array; namespace bbb = bsoncxx::builder::basic; - auto trans = mongo_conn[db_name][trans_col]; auto trans_doc = bsoncxx::builder::basic::document{}; auto now = std::chrono::duration_cast( @@ -782,8 +803,6 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio using namespace bsoncxx::types; using bsoncxx::builder::basic::kvp; - auto trans_traces = mongo_conn[db_name][trans_traces_col]; - auto action_traces = mongo_conn[db_name][action_traces_col]; auto trans_traces_doc = bsoncxx::builder::basic::document{}; auto now = std::chrono::duration_cast( @@ -864,7 +883,6 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); if( store_block_states ) { - auto block_states = mongo_conn[db_name][block_states_col]; auto block_state_doc = bsoncxx::builder::basic::document{}; block_state_doc.append( kvp( "block_num", b_int32{static_cast(block_num)} ), kvp( "block_id", block_id_str ), @@ -901,7 +919,6 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr } if( store_blocks ) { - auto blocks = mongo_conn[db_name][blocks_col]; auto block_doc = bsoncxx::builder::basic::document{}; block_doc.append( kvp( "block_num", b_int32{static_cast(block_num)} ), kvp( "block_id", block_id_str ) ); @@ -950,7 +967,6 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); if( store_blocks ) { - auto blocks = mongo_conn[db_name][blocks_col]; auto ir_block = find_block( blocks, block_id_str ); if( !ir_block ) { _process_accepted_block( bs ); @@ -967,7 +983,6 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ } if( store_block_states ) { - auto block_states = mongo_conn[db_name][block_states_col]; auto ir_block = find_block( block_states, block_id_str ); if( !ir_block ) { _process_accepted_block( bs ); @@ -985,7 +1000,6 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ if( store_transactions ) { const auto block_num = bs->block->block_num(); - auto trans = mongo_conn[db_name][trans_col]; bool transactions_in_block = false; mongocxx::options::bulk_write bulk_opts; bulk_opts.ordered( false ); @@ -1036,8 +1050,6 @@ void mongo_db_plugin_impl::add_pub_keys( const vector& keys, if( keys.empty()) return; - auto pub_keys = mongo_conn[db_name][pub_keys_col]; - mongocxx::bulk_write bulk = pub_keys.create_bulk_write(); for( const auto& pub_key_weight : keys ) { @@ -1071,8 +1083,6 @@ void mongo_db_plugin_impl::remove_pub_keys( const account_name& name, const perm using bsoncxx::builder::basic::kvp; using bsoncxx::builder::basic::make_document; - auto pub_keys = mongo_conn[db_name][pub_keys_col]; - try { auto result = pub_keys.delete_many( make_document( kvp( "account", name.to_string()), kvp( "permission", permission.to_string()))); @@ -1096,8 +1106,6 @@ void mongo_db_plugin_impl::add_account_control( const vectoracquire(); + auto& mongo_conn = *client; + auto block_states = mongo_conn[db_name][block_states_col]; auto blocks = mongo_conn[db_name][blocks_col]; auto trans = mongo_conn[db_name][trans_col]; @@ -1297,7 +1305,10 @@ void mongo_db_plugin_impl::init() { // Create the native contract accounts manually; sadly, we can't run their contracts to make them create themselves // See native_contract_chain_initializer::prepare_database() - accounts = mongo_conn[db_name][accounts_col]; + auto client = mongo_pool->acquire(); + auto& mongo_conn = *client; + + auto accounts = mongo_conn[db_name][accounts_col]; if (accounts.count(make_document()) == 0) { auto now = std::chrono::duration_cast( std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); @@ -1484,6 +1495,10 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) my->filter_out.insert( fe ); } } + if( options.count( "producer-name") ) { + wlog( "mongodb plugin not recommended on producer node" ); + my->is_producer = true; + } if( my->start_block_num == 0 ) { my->start_block_reached = true; @@ -1495,7 +1510,7 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) my->db_name = uri.database(); if( my->db_name.empty()) my->db_name = "EOS"; - my->mongo_conn = mongocxx::client{uri}; + my->mongo_pool.emplace(uri); // hook up to signals on controller chain_plugin* chain_plug = app().find_plugin(); From 3e8fba3138d7130fb5e8d064ad059b3482dfa802 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 4 Sep 2018 16:39:19 -0500 Subject: [PATCH 069/194] Encapsulate account_ram_delta in apply_context --- libraries/chain/apply_context.cpp | 9 +++++++-- libraries/chain/eosio_contract.cpp | 16 ++++++++-------- .../chain/include/eosio/chain/apply_context.hpp | 3 +++ .../include/eosio/chain/transaction_context.hpp | 5 ++--- libraries/chain/transaction_context.cpp | 1 - 5 files changed, 20 insertions(+), 14 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 551b6626cb8..82206c05c13 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -79,8 +79,8 @@ action_trace apply_context::exec_one() t.block_num = control.pending_block_state()->block_num; t.block_time = control.pending_block_time(); t.producer_block_id = control.pending_producer_block_id(); - t.account_ram_delta = std::move( trx_context.account_ram_delta ); - trx_context.account_ram_delta.clear(); + t.account_ram_delta = std::move( _account_ram_delta ); + _account_ram_delta.clear(); t.act = act; t.console = _pending_console_output.str(); @@ -639,5 +639,10 @@ uint64_t apply_context::next_auth_sequence( account_name actor ) { return rs.auth_sequence; } +void apply_context::add_ram_usage( account_name account, int64_t ram_delta ) { + trx_context.add_ram_usage( account, ram_delta ); + _account_ram_delta[account] += ram_delta; +} + } } /// eosio::chain diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index 71d846b38be..33a123981a1 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -121,7 +121,7 @@ void apply_eosio_newaccount(apply_context& context) { ram_delta += owner_permission.auth.get_billable_size(); ram_delta += active_permission.auth.get_billable_size(); - context.trx_context.add_ram_usage(create.name, ram_delta); + context.add_ram_usage(create.name, ram_delta); } FC_CAPTURE_AND_RETHROW( (create) ) } @@ -167,7 +167,7 @@ void apply_eosio_setcode(apply_context& context) { }); if (new_size != old_size) { - context.trx_context.add_ram_usage( act.account, new_size - old_size ); + context.add_ram_usage( act.account, new_size - old_size ); } } @@ -196,7 +196,7 @@ void apply_eosio_setabi(apply_context& context) { }); if (new_size != old_size) { - context.trx_context.add_ram_usage( act.account, new_size - old_size ); + context.add_ram_usage( act.account, new_size - old_size ); } } @@ -254,13 +254,13 @@ void apply_eosio_updateauth(apply_context& context) { int64_t new_size = (int64_t)(config::billable_size_v + permission->auth.get_billable_size()); - context.trx_context.add_ram_usage( permission->owner, new_size - old_size ); + context.add_ram_usage( permission->owner, new_size - old_size ); } else { const auto& p = authorization.create_permission( update.account, update.permission, parent_id, update.auth ); int64_t new_size = (int64_t)(config::billable_size_v + p.auth.get_billable_size()); - context.trx_context.add_ram_usage( update.account, new_size ); + context.add_ram_usage( update.account, new_size ); } } @@ -291,7 +291,7 @@ void apply_eosio_deleteauth(apply_context& context) { authorization.remove_permission( permission ); - context.trx_context.add_ram_usage( remove.account, -old_size ); + context.add_ram_usage( remove.account, -old_size ); } @@ -334,7 +334,7 @@ void apply_eosio_linkauth(apply_context& context) { link.required_permission = requirement.requirement; }); - context.trx_context.add_ram_usage( + context.add_ram_usage( l.account, (int64_t)(config::billable_size_v) ); @@ -354,7 +354,7 @@ void apply_eosio_unlinkauth(apply_context& context) { auto link_key = boost::make_tuple(unlink.account, unlink.code, unlink.type); auto link = db.find(link_key); EOS_ASSERT(link != nullptr, action_validate_exception, "Attempting to unlink authority, but no link found"); - context.trx_context.add_ram_usage( + context.add_ram_usage( link->account, -(int64_t)(config::billable_size_v) ); diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index ef44ca7e0df..51bc4c1744b 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -572,6 +572,8 @@ class apply_context { uint64_t next_recv_sequence( account_name receiver ); uint64_t next_auth_sequence( account_name actor ); + void add_ram_usage( account_name account, int64_t ram_delta ); + private: void validate_referenced_accounts( const transaction& t )const; @@ -607,6 +609,7 @@ class apply_context { vector _inline_actions; ///< queued inline messages vector _cfa_inline_actions; ///< queued inline messages std::ostringstream _pending_console_output; + flat_map _account_ram_delta; //bytes _cached_trx; }; diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 38857e98aeb..3175994dedd 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -38,8 +38,6 @@ namespace eosio { namespace chain { void pause_billing_timer(); void resume_billing_timer(); - void add_ram_usage( account_name account, int64_t ram_delta ); - uint32_t update_billed_cpu_time( fc::time_point now ); std::tuple max_bandwidth_billed_accounts_can_pay( bool force_elastic_limits = false )const; @@ -49,6 +47,8 @@ namespace eosio { namespace chain { friend struct controller_impl; friend class apply_context; + void add_ram_usage( account_name account, int64_t ram_delta ); + void dispatch_action( action_trace& trace, const action& a, account_name receiver, bool context_free = false, uint32_t recurse_depth = 0 ); inline void dispatch_action( action_trace& trace, const action& a, bool context_free = false ) { dispatch_action(trace, a, a.account, context_free); @@ -74,7 +74,6 @@ namespace eosio { namespace chain { vector executed; flat_set bill_to_accounts; flat_set validate_ram_usage; - flat_map account_ram_delta; // reset for each action /// the maximum number of virtual CPU instructions of the transaction that can be safely billed to the billable accounts uint64_t initial_max_billable_cpu = 0; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index a657109a078..dd58f0364ec 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -386,7 +386,6 @@ namespace eosio { namespace chain { if( ram_delta > 0 ) { validate_ram_usage.insert( account ); } - account_ram_delta[account] += ram_delta; } uint32_t transaction_context::update_billed_cpu_time( fc::time_point now ) { From 6bab6a90604914afe5d4c874a7c7a16008c90cf5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 5 Sep 2018 09:21:05 -0500 Subject: [PATCH 070/194] Call apply_context add_ram_usage to record account ram usage --- libraries/chain/apply_context.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 82206c05c13..00a6baf0df2 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -308,14 +308,14 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act.account) || (receiver == payer) || privileged, subjective_block_production_exception, "Cannot charge RAM to other accounts during notify." ); - trx_context.add_ram_usage( payer, (config::billable_size_v + trx_size) ); + add_ram_usage( payer, (config::billable_size_v + trx_size) ); } bool apply_context::cancel_deferred_transaction( const uint128_t& sender_id, account_name sender ) { auto& generated_transaction_idx = db.get_mutable_index(); const auto* gto = db.find(boost::make_tuple(sender, sender_id)); if ( gto ) { - trx_context.add_ram_usage( gto->payer, -(config::billable_size_v + gto->packed_trx.size()) ); + add_ram_usage( gto->payer, -(config::billable_size_v + gto->packed_trx.size()) ); generated_transaction_idx.remove(*gto); } return gto; @@ -374,7 +374,7 @@ void apply_context::update_db_usage( const account_name& payer, int64_t delta ) require_authorization( payer ); } } - trx_context.add_ram_usage(payer, delta); + add_ram_usage(payer, delta); } From b24f41fdc4bc30d14b072c0a68db0d1d08bfa958 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 5 Sep 2018 09:57:05 -0500 Subject: [PATCH 071/194] Misc cleanup --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index e1dd30893a3..2527b040a70 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -133,7 +133,6 @@ class mongo_db_plugin_impl { fc::optional mongo_pool; // consum thread - fc::optional mongo_client; mongocxx::collection accounts; mongocxx::collection trans; mongocxx::collection trans_traces; @@ -347,8 +346,8 @@ void mongo_db_plugin_impl::accepted_block( const chain::block_state_ptr& bs ) { void mongo_db_plugin_impl::consume_blocks() { try { - mongo_client = mongo_pool->acquire(); - auto& mongo_conn = **mongo_client; + auto mongo_client = mongo_pool->acquire(); + auto& mongo_conn = *mongo_client; accounts = mongo_conn[db_name][accounts_col]; trans = mongo_conn[db_name][trans_col]; @@ -1255,7 +1254,6 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) } mongo_db_plugin_impl::mongo_db_plugin_impl() -: mongo_client{} { } @@ -1284,7 +1282,7 @@ void mongo_db_plugin_impl::wipe_database() { auto trans = mongo_conn[db_name][trans_col]; auto trans_traces = mongo_conn[db_name][trans_traces_col]; auto action_traces = mongo_conn[db_name][action_traces_col]; - accounts = mongo_conn[db_name][accounts_col]; + auto accounts = mongo_conn[db_name][accounts_col]; auto pub_keys = mongo_conn[db_name][pub_keys_col]; auto account_controls = mongo_conn[db_name][account_controls_col]; @@ -1331,9 +1329,9 @@ void mongo_db_plugin_impl::init() { blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); - auto block_stats = mongo_conn[db_name][block_states_col]; - block_stats.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - block_stats.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + auto block_states = mongo_conn[db_name][block_states_col]; + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); // accounts indexes accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); From 80e60704babe8aad016c014e973c90c401bd5ad4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 5 Sep 2018 11:50:25 -0500 Subject: [PATCH 072/194] Possible fix for centos test failure --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 194 ++++++++++---------- 1 file changed, 100 insertions(+), 94 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 2527b040a70..539538d143d 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -133,14 +133,14 @@ class mongo_db_plugin_impl { fc::optional mongo_pool; // consum thread - mongocxx::collection accounts; - mongocxx::collection trans; - mongocxx::collection trans_traces; - mongocxx::collection action_traces; - mongocxx::collection block_states; - mongocxx::collection blocks; - mongocxx::collection pub_keys; - mongocxx::collection account_controls; + mongocxx::collection _accounts; + mongocxx::collection _trans; + mongocxx::collection _trans_traces; + mongocxx::collection _action_traces; + mongocxx::collection _block_states; + mongocxx::collection _blocks; + mongocxx::collection _pub_keys; + mongocxx::collection _account_controls; size_t max_queue_size = 0; int queue_sleep_time = 0; @@ -349,14 +349,14 @@ void mongo_db_plugin_impl::consume_blocks() { auto mongo_client = mongo_pool->acquire(); auto& mongo_conn = *mongo_client; - accounts = mongo_conn[db_name][accounts_col]; - trans = mongo_conn[db_name][trans_col]; - trans_traces = mongo_conn[db_name][trans_traces_col]; - action_traces = mongo_conn[db_name][action_traces_col]; - blocks = mongo_conn[db_name][blocks_col]; - block_states = mongo_conn[db_name][block_states_col]; - pub_keys = mongo_conn[db_name][pub_keys_col]; - account_controls = mongo_conn[db_name][account_controls_col]; + _accounts = mongo_conn[db_name][accounts_col]; + _trans = mongo_conn[db_name][trans_col]; + _trans_traces = mongo_conn[db_name][trans_traces_col]; + _action_traces = mongo_conn[db_name][action_traces_col]; + _blocks = mongo_conn[db_name][blocks_col]; + _block_states = mongo_conn[db_name][block_states_col]; + _pub_keys = mongo_conn[db_name][pub_keys_col]; + _account_controls = mongo_conn[db_name][account_controls_col]; while (true) { boost::mutex::scoped_lock lock(mtx); @@ -455,6 +455,7 @@ void mongo_db_plugin_impl::consume_blocks() { break; } } + mongo_pool.reset(); ilog("mongo_db_plugin consume thread shutdown gracefully"); } catch (fc::exception& e) { elog("FC Exception while consuming block ${e}", ("e", e.to_string())); @@ -551,7 +552,7 @@ optional mongo_db_plugin_impl::get_abi_serializer( account_name return itr->serializer; } - auto account = accounts.find_one( make_document( kvp("name", n.to_string())) ); + auto account = _accounts.find_one( make_document( kvp("name", n.to_string())) ); if(account) { auto view = account->view(); abi_def abi; @@ -737,8 +738,8 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti try { mongocxx::options::update update_opts{}; update_opts.upsert( true ); - if( !trans.update_one( make_document( kvp( "trx_id", trx_id_str ) ), - make_document( kvp( "$set", trans_doc.view() ) ), update_opts ) ) { + if( !_trans.update_one( make_document( kvp( "trx_id", trx_id_str ) ), + make_document( kvp( "$set", trans_doc.view() ) ), update_opts ) ) { EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert trans ${id}", ("id", trx_id) ); } } catch( ... ) { @@ -809,7 +810,7 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio mongocxx::options::bulk_write bulk_opts; bulk_opts.ordered(false); - mongocxx::bulk_write bulk_action_traces = action_traces.create_bulk_write(bulk_opts); + mongocxx::bulk_write bulk_action_traces = _action_traces.create_bulk_write(bulk_opts); bool write_atraces = false; bool executed = t->receipt.valid() && t->receipt->status == chain::transaction_receipt_header::executed; @@ -855,7 +856,7 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio trans_traces_doc.append( kvp( "createdAt", b_date{now} )); try { - if( !trans_traces.insert_one( trans_traces_doc.view())) { + if( !_trans_traces.insert_one( trans_traces_doc.view())) { EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert trans ${id}", ("id", t->id)); } } catch(...) { @@ -908,8 +909,8 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr block_state_doc.append( kvp( "createdAt", b_date{now} ) ); try { - if( !block_states.update_one( make_document( kvp( "block_id", block_id_str ) ), - make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { + if( !_block_states.update_one( make_document( kvp( "block_id", block_id_str ) ), + make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${bid}", ("bid", block_id) ); } } catch( ... ) { @@ -941,8 +942,8 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr block_doc.append( kvp( "createdAt", b_date{now} ) ); try { - if( !blocks.update_one( make_document( kvp( "block_id", block_id_str ) ), - make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { + if( !_blocks.update_one( make_document( kvp( "block_id", block_id_str ) ), + make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${bid}", ("bid", block_id) ); } } catch( ... ) { @@ -966,10 +967,10 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); if( store_blocks ) { - auto ir_block = find_block( blocks, block_id_str ); + auto ir_block = find_block( _blocks, block_id_str ); if( !ir_block ) { _process_accepted_block( bs ); - ir_block = find_block( blocks, block_id_str ); + ir_block = find_block( _blocks, block_id_str ); if( !ir_block ) return; // should never happen } @@ -978,14 +979,14 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ kvp( "in_current_chain", b_bool{bs->in_current_chain} ), kvp( "updatedAt", b_date{now} ) ) ) ); - blocks.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); + _blocks.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); } if( store_block_states ) { - auto ir_block = find_block( block_states, block_id_str ); + auto ir_block = find_block( _block_states, block_id_str ); if( !ir_block ) { _process_accepted_block( bs ); - ir_block = find_block( block_states, block_id_str ); + ir_block = find_block( _block_states, block_id_str ); if( !ir_block ) return; // should never happen } @@ -994,7 +995,7 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ kvp( "in_current_chain", b_bool{bs->in_current_chain} ), kvp( "updatedAt", b_date{now} ) ) ) ); - block_states.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); + _block_states.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); } if( store_transactions ) { @@ -1002,7 +1003,7 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ bool transactions_in_block = false; mongocxx::options::bulk_write bulk_opts; bulk_opts.ordered( false ); - auto bulk = trans.create_bulk_write( bulk_opts ); + auto bulk = _trans.create_bulk_write( bulk_opts ); for( const auto& receipt : bs->block->transactions ) { string trx_id_str; @@ -1049,7 +1050,7 @@ void mongo_db_plugin_impl::add_pub_keys( const vector& keys, if( keys.empty()) return; - mongocxx::bulk_write bulk = pub_keys.create_bulk_write(); + mongocxx::bulk_write bulk = _pub_keys.create_bulk_write(); for( const auto& pub_key_weight : keys ) { auto find_doc = bsoncxx::builder::basic::document(); @@ -1083,7 +1084,7 @@ void mongo_db_plugin_impl::remove_pub_keys( const account_name& name, const perm using bsoncxx::builder::basic::make_document; try { - auto result = pub_keys.delete_many( make_document( kvp( "account", name.to_string()), + auto result = _pub_keys.delete_many( make_document( kvp( "account", name.to_string()), kvp( "permission", permission.to_string()))); if( !result ) { EOS_ASSERT( false, chain::mongo_db_update_fail, @@ -1105,7 +1106,7 @@ void mongo_db_plugin_impl::add_account_control( const vector(); - create_account( accounts, newacc.name, now ); + create_account( _accounts, newacc.name, now ); add_pub_keys( newacc.owner.keys, newacc.name, owner, now ); add_account_control( newacc.owner.accounts, newacc.name, owner, now ); @@ -1220,10 +1221,10 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) abi_cache_index.erase( setabi.account ); - auto account = find_account( accounts, setabi.account ); + auto account = find_account( _accounts, setabi.account ); if( !account ) { - create_account( accounts, setabi.account, now ); - account = find_account( accounts, setabi.account ); + create_account( _accounts, setabi.account, now ); + account = find_account( _accounts, setabi.account ); } if( account ) { abi_def abi_def = fc::raw::unpack( setabi.abi ); @@ -1235,8 +1236,8 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) kvp( "updatedAt", b_date{now} )))); try { - if( !accounts.update_one( make_document( kvp( "_id", account->view()["_id"].get_oid())), - update_from.view())) { + if( !_accounts.update_one( make_document( kvp( "_id", account->view()["_id"].get_oid())), + update_from.view())) { EOS_ASSERT( false, chain::mongo_db_update_fail, "Failed to udpdate account ${n}", ("n", setabi.account)); } } catch( ... ) { @@ -1303,63 +1304,68 @@ void mongo_db_plugin_impl::init() { // Create the native contract accounts manually; sadly, we can't run their contracts to make them create themselves // See native_contract_chain_initializer::prepare_database() - auto client = mongo_pool->acquire(); - auto& mongo_conn = *client; + try { + auto client = mongo_pool->acquire(); + auto& mongo_conn = *client; - auto accounts = mongo_conn[db_name][accounts_col]; - if (accounts.count(make_document()) == 0) { - auto now = std::chrono::duration_cast( - std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); + auto accounts = mongo_conn[db_name][accounts_col]; + if( accounts.count( make_document()) == 0 ) { + auto now = std::chrono::duration_cast( + std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()} ); - auto doc = make_document( kvp( "name", name( chain::config::system_account_name ).to_string()), - kvp( "createdAt", b_date{now} )); + auto doc = make_document( kvp( "name", name( chain::config::system_account_name ).to_string()), + kvp( "createdAt", b_date{now} )); - try { - if( !accounts.insert_one( doc.view())) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert account ${n}", - ("n", name( chain::config::system_account_name ).to_string())); + try { + if( !accounts.insert_one( doc.view())) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert account ${n}", + ("n", name( chain::config::system_account_name ).to_string())); + } + } catch (...) { + handle_mongo_exception( "account insert", __LINE__ ); } - } catch(...) { - handle_mongo_exception("account insert", __LINE__); - } - - try { - // blocks indexes - auto blocks = mongo_conn[db_name][blocks_col]; - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); - - auto block_states = mongo_conn[db_name][block_states_col]; - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); - - // accounts indexes - accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); - - // transactions indexes - auto trans = mongo_conn[db_name][trans_col]; - trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); - - auto trans_trace = mongo_conn[db_name][trans_traces_col]; - trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1 })xxx" )); - - // action traces indexes - auto action_traces = mongo_conn[db_name][action_traces_col]; - action_traces.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); - // pub_keys indexes - auto pub_keys = mongo_conn[db_name][pub_keys_col]; - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1 })xxx" )); - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1 })xxx" )); - - // account_controls indexes - auto account_controls = mongo_conn[db_name][account_controls_col]; - account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1 })xxx" )); - account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1 })xxx" )); - - } catch(...) { - handle_mongo_exception("create indexes", __LINE__); + try { + // blocks indexes + auto blocks = mongo_conn[db_name][blocks_col]; + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + + auto block_states = mongo_conn[db_name][block_states_col]; + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + + // accounts indexes + accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); + + // transactions indexes + auto trans = mongo_conn[db_name][trans_col]; + trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + + auto trans_trace = mongo_conn[db_name][trans_traces_col]; + trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1 })xxx" )); + + // action traces indexes + auto action_traces = mongo_conn[db_name][action_traces_col]; + action_traces.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + + // pub_keys indexes + auto pub_keys = mongo_conn[db_name][pub_keys_col]; + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1 })xxx" )); + + // account_controls indexes + auto account_controls = mongo_conn[db_name][account_controls_col]; + account_controls.create_index( + bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1 })xxx" )); + account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1 })xxx" )); + + } catch (...) { + handle_mongo_exception( "create indexes", __LINE__ ); + } } + } catch (...) { + handle_mongo_exception( "mongo init", __LINE__ ); } ilog("starting db plugin thread"); From 9da6f2731ef72e20a1ee0198e4599f424a01f56c Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 5 Sep 2018 14:16:54 -0500 Subject: [PATCH 073/194] Storing off last 10 cout and cerr results from calling checkOutput to report on failure. GH #5199 --- tests/TestHelper.py | 6 ++++++ tests/testUtils.py | 3 +++ 2 files changed, 9 insertions(+) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 9270c1a75a1..e521eb3e696 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -122,6 +122,12 @@ def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, kil if walletMgr: walletMgr.dumpErrorDetails() Utils.Print("== Errors see above ==") + if len(Utils.CheckOutputDeque)>0: + Utils.Print("== cout/cerr pairs from last %d calls to Utils. ==" % len(Utils.CheckOutputDeque)) + for out, err in Utils.CheckOutputDeque: + Utils.Print("cout={%s}" % (out)) + Utils.Print("cerr={%s}\n" % (out)) + Utils.Print("== cout/cerr pairs done. ==") if killEosInstances: Utils.Print("Shut down the cluster.") diff --git a/tests/testUtils.py b/tests/testUtils.py index ad77cd20c4c..d2e3d5be9f3 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -1,6 +1,7 @@ import subprocess import time import os +from collections import deque from collections import namedtuple import inspect import json @@ -24,6 +25,7 @@ class Utils: EosLauncherPath="programs/eosio-launcher/eosio-launcher" MongoPath="mongo" ShuttingDown=False + CheckOutputDeque=deque(maxlen=10) @staticmethod def Print(*args, **kwargs): @@ -76,6 +78,7 @@ def checkOutput(cmd): assert(isinstance(cmd, list)) popen=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output,error)=popen.communicate() + Utils.CheckOutputDeque.append((output,error)) if popen.returncode != 0: raise subprocess.CalledProcessError(returncode=popen.returncode, cmd=cmd, output=error) return output.decode("utf-8") From 1fbd885a93540fffc696bf29e291793468897e18 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 2 Aug 2018 13:41:49 -0500 Subject: [PATCH 074/194] Added flag to prevent cleos from launching keosd. GH #4973 --- programs/cleos/main.cpp | 4 ++++ tests/Node.py | 14 +++++++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 972818e9f20..b829befe467 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -180,6 +180,7 @@ bool tx_skip_sign = false; bool tx_print_json = false; bool print_request = false; bool print_response = false; +bool no_auto_keosd = false; uint8_t tx_max_cpu_usage = 0; uint32_t tx_max_net_usage = 0; @@ -788,6 +789,8 @@ void try_local_port( const string& lo_address, uint16_t port, uint32_t duration } void ensure_keosd_running(CLI::App* app) { + if (!no_auto_keosd) + return; // get, version, net do not require keosd if (tx_skip_sign || app->got_subcommand("get") || app->got_subcommand("version") || app->got_subcommand("net")) return; @@ -1742,6 +1745,7 @@ int main( int argc, char** argv ) { app.add_option( "-r,--header", header_opt_callback, localized("pass specific HTTP header; repeat this option to pass multiple headers")); app.add_flag( "-n,--no-verify", no_verify, localized("don't verify peer certificate when using HTTPS")); + app.add_flag( "--no-auto-keosd", no_auto_keosd, localized("don't automatically launch a keosd if one is not currently running")); app.set_callback([&app]{ ensure_keosd_running(&app);}); bool verbose_errors = false; diff --git a/tests/Node.py b/tests/Node.py index 39de900aa71..d0568daedfb 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -38,6 +38,7 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= self.mongoPort=mongoPort self.mongoDb=mongoDb self.endpointArgs="--url http://%s:%d" % (self.host, self.port) + self.miscEosClientArgs="--no-auto-keosd" self.mongoEndpointArgs="" self.infoValid=None self.lastRetrievedHeadBlockNum=None @@ -45,6 +46,9 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= if self.enableMongo: self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb) + def eosClientArgs(self): + return self.endpointArgs + " " + self.miscEosClientArgs + def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) return "Host: %s, Port:%d" % (self.host, self.port) @@ -576,7 +580,7 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False assert(isinstance(destination, Account)) cmd="%s %s -v transfer -j %s %s" % ( - Utils.EosClientPath, self.endpointArgs, source.name, destination.name) + Utils.EosClientPath, self.eosClientArgs(), source.name, destination.name) cmdArr=cmd.split() cmdArr.append(amountStr) cmdArr.append(memo) @@ -741,7 +745,7 @@ def getAccountEosBalance(self, scope): return balance def getAccountCodeHash(self, account): - cmd="%s %s get code %s" % (Utils.EosClientPath, self.endpointArgs, account) + cmd="%s %s get code %s" % (Utils.EosClientPath, self.eosClientArgs(), account) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) try: retStr=Utils.checkOutput(cmd.split()) @@ -761,7 +765,7 @@ def getAccountCodeHash(self, account): # publish contract and return transaction as json object def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransBlock=False, shouldFail=False): - cmd="%s %s -v set contract -j %s %s" % (Utils.EosClientPath, self.endpointArgs, account, contractDir) + cmd="%s %s -v set contract -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, contractDir) cmd += "" if wasmFile is None else (" "+ wasmFile) cmd += "" if abiFile is None else (" " + abiFile) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) @@ -815,7 +819,7 @@ def getTableColumns(self, contract, scope, table): # returns tuple with transaction and def pushMessage(self, account, action, data, opts, silentErrors=False): - cmd="%s %s push action -j %s %s" % (Utils.EosClientPath, self.endpointArgs, account, action) + cmd="%s %s push action -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, action) cmdArr=cmd.split() if data is not None: cmdArr.append(data) @@ -872,7 +876,7 @@ def vote(self, account, producers, waitForTransBlock=False, exitOnError=False): def processCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): assert(isinstance(returnType, ReturnType)) - cmd="%s %s %s" % (Utils.EosClientPath, self.endpointArgs, cmd) + cmd="%s %s %s" % (Utils.EosClientPath, self.eosClientArgs(), cmd) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) trans=None try: From 5e75750fb00996e73c70b8524cd4ab10044ca449 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 5 Sep 2018 15:39:10 -0500 Subject: [PATCH 075/194] Better error handling --- tests/Cluster.py | 5 ++++- tests/testUtils.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 5f3d26d3d51..dac53606bf8 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -1256,4 +1256,7 @@ def reportStatus(self): self.biosNode.reportStatus() if hasattr(self, "nodes"): for node in self.nodes: - node.reportStatus() + try: + node.reportStatus() + except: + Utils.Print("No reportStatus") diff --git a/tests/testUtils.py b/tests/testUtils.py index ad77cd20c4c..8cbf5946e36 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -87,7 +87,7 @@ def errorExit(msg="", raw=False, errorCode=1): return Utils.Print("ERROR:" if not raw else "", msg) traceback.print_stack(limit=-1) - exit(errorCode) + sys.exit(errorCode) @staticmethod def cmdError(name, cmdCode=0): From 65c1cfdbc14671afc1bea444ab76e72c48574021 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 5 Sep 2018 16:26:01 -0500 Subject: [PATCH 076/194] Additional info output --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 2 ++ tests/testUtils.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 539538d143d..4bc0becd777 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1295,6 +1295,7 @@ void mongo_db_plugin_impl::wipe_database() { accounts.drop(); pub_keys.drop(); account_controls.drop(); + ilog("done wipe_database"); } void mongo_db_plugin_impl::init() { @@ -1304,6 +1305,7 @@ void mongo_db_plugin_impl::init() { // Create the native contract accounts manually; sadly, we can't run their contracts to make them create themselves // See native_contract_chain_initializer::prepare_database() + ilog("init mongo"); try { auto client = mongo_pool->acquire(); auto& mongo_conn = *client; diff --git a/tests/testUtils.py b/tests/testUtils.py index 8cbf5946e36..9629272b17c 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -6,6 +6,7 @@ import json import shlex from sys import stdout +from sys import exit import traceback ########################################################################################### @@ -87,7 +88,7 @@ def errorExit(msg="", raw=False, errorCode=1): return Utils.Print("ERROR:" if not raw else "", msg) traceback.print_stack(limit=-1) - sys.exit(errorCode) + exit(errorCode) @staticmethod def cmdError(name, cmdCode=0): From d27700603b682e64c8a3f28e8954116693e0207d Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 6 Sep 2018 16:22:51 +0800 Subject: [PATCH 077/194] add test case for get_scope --- plugins/chain_plugin/chain_plugin.cpp | 3 +- .../eosio/chain_plugin/chain_plugin.hpp | 2 +- tests/CMakeLists.txt | 2 +- tests/get_table_tests.cpp | 124 ++++++++++++++++++ 4 files changed, 127 insertions(+), 4 deletions(-) create mode 100644 tests/get_table_tests.cpp diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 42dcfc60174..0ea57d7cd63 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1136,8 +1136,7 @@ read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_o } continue; } - get_table_by_scope_result_row row{itr->code, itr->scope, itr->table, itr->payer, itr->count}; - result.rows.emplace_back(fc::variant(row)); + result.rows.push_back({itr->code, itr->scope, itr->table, itr->payer, itr->count}); if (++count == p.limit || fc::time_point::now() > end) { ++itr; break; diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 53152c78d1f..81c04b9b355 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -277,7 +277,7 @@ class read_only { uint32_t count; }; struct get_table_by_scope_result { - vector rows; ///< one row per item, either encoded as hex String or JSON object + vector rows; bool more = false; ///< true if last element in data is not the end and sizeof data() < limit }; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 1469d7c7798..059d42a0856 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -13,7 +13,7 @@ set( CMAKE_CXX_STANDARD 14 ) include_directories("${CMAKE_SOURCE_DIR}/plugins/wallet_plugin/include") -file(GLOB UNIT_TESTS "wallet_tests.cpp") +file(GLOB UNIT_TESTS "*.cpp") add_executable( plugin_test ${UNIT_TESTS} ${WASM_UNIT_TESTS} main.cpp) target_link_libraries( plugin_test eosio_testing eosio_chain chainbase eos_utilities chain_plugin wallet_plugin abi_generator fc ${PLATFORM_SPECIFIC_LIBS} ) diff --git a/tests/get_table_tests.cpp b/tests/get_table_tests.cpp new file mode 100644 index 00000000000..d2e648977ab --- /dev/null +++ b/tests/get_table_tests.cpp @@ -0,0 +1,124 @@ +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include + +#include + +#include +#include + +#include +#include + +#ifdef NON_VALIDATING_TEST +#define TESTER tester +#else +#define TESTER validating_tester +#endif + +using namespace eosio; +using namespace eosio::chain; +using namespace eosio::testing; +using namespace fc; + +BOOST_AUTO_TEST_SUITE(get_table_tests) + +BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { + produce_blocks(2); + + create_accounts({ N(eosio.token), N(eosio.ram), N(eosio.ramfee), N(eosio.stake), + N(eosio.bpay), N(eosio.vpay), N(eosio.saving), N(eosio.names) }); + + std::vector accs{N(inita), N(initb), N(initc), N(initd)}; + create_accounts(accs); + produce_block(); + + set_code( N(eosio.token), eosio_token_wast ); + set_abi( N(eosio.token), eosio_token_abi ); + produce_blocks(1); + + // create currency + auto act = mutable_variant_object() + ("issuer", "eosio") + ("maximum_supply", eosio::chain::asset::from_string("1000000000.0000 SYS")); + push_action(N(eosio.token), N(create), N(eosio.token), act ); + + // issue + for (account_name a: accs) { + push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() + ("to", name(a) ) + ("quantity", eosio::chain::asset::from_string("999.0000 SYS") ) + ("memo", "") + ); + } + produce_blocks(1); + + // iterate over scope + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only::get_table_by_scope_params param{N(eosio.token), N(accounts), "inita", "", 10}; + eosio::chain_apis::read_only::get_table_by_scope_result result = plugin.read_only::get_table_by_scope(param); + + BOOST_REQUIRE_EQUAL(4, result.rows.size()); + BOOST_REQUIRE_EQUAL(false, result.more); + if (result.rows.size() >= 4) { + BOOST_REQUIRE_EQUAL(name(N(eosio.token)), result.rows[0].code); + BOOST_REQUIRE_EQUAL(name(N(inita)), result.rows[0].scope); + BOOST_REQUIRE_EQUAL(name(N(accounts)), result.rows[0].table); + BOOST_REQUIRE_EQUAL(name(N(eosio)), result.rows[0].payer); + BOOST_REQUIRE_EQUAL(1, result.rows[0].count); + + BOOST_REQUIRE_EQUAL(name(N(initb)), result.rows[1].scope); + BOOST_REQUIRE_EQUAL(name(N(initc)), result.rows[2].scope); + BOOST_REQUIRE_EQUAL(name(N(initd)), result.rows[3].scope); + } + + param.lower_bound = "initb"; + param.upper_bound = "initd"; + result = plugin.read_only::get_table_by_scope(param); + BOOST_REQUIRE_EQUAL(2, result.rows.size()); + BOOST_REQUIRE_EQUAL(false, result.more); + if (result.rows.size() >= 2) { + BOOST_REQUIRE_EQUAL(name(N(initb)), result.rows[0].scope); + BOOST_REQUIRE_EQUAL(name(N(initc)), result.rows[1].scope); + } + + param.limit = 1; + result = plugin.read_only::get_table_by_scope(param); + BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(true, result.more); + + param.table = name(0); + result = plugin.read_only::get_table_by_scope(param); + BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(true, result.more); + + param.table = N(invalid); + result = plugin.read_only::get_table_by_scope(param); + BOOST_REQUIRE_EQUAL(0, result.rows.size()); + BOOST_REQUIRE_EQUAL(false, result.more); + +} FC_LOG_AND_RETHROW() /// get_scope_test + +BOOST_AUTO_TEST_SUITE_END() + From 94d0837019875abeb0204c521df949ac140d397c Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 6 Sep 2018 09:09:07 -0500 Subject: [PATCH 078/194] Prevent transaction time constraints from interferring with integration tests. GH #5199 --- tests/Cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 5f3d26d3d51..b746040b15a 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -135,7 +135,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time 50000 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time 990000 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: From 3df606059b4abec7caa00bc9e4d2bbd92349af70 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 6 Sep 2018 09:11:02 -0500 Subject: [PATCH 079/194] Added exception handling around testUtils.checkOutput. GH #5199 --- tests/Cluster.py | 6 ++++-- tests/WalletMgr.py | 31 ++++++++++++++++++++++++++----- 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index b746040b15a..a9987960949 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -1064,8 +1064,10 @@ def myFunc(): if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) psOut=Utils.checkOutput(cmd.split()) return psOut - except subprocess.CalledProcessError as _: - pass + except subprocess.CalledProcessError as ex: + msg=ex.output.decode("utf-8") + Utils.Print("ERROR: call of \"%s\" failed. %s" % (cmd, msg)) + return None return None psOut=Utils.waitForObj(myFunc, timeout) diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 934e0258638..4edda77d7d9 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -55,7 +55,17 @@ def create(self, name, accounts=None, exitOnError=True): p = re.compile(r'\n\"(\w+)\"\n', re.MULTILINE) cmd="%s %s wallet create --name %s --to-console" % (Utils.EosClientPath, self.endpointArgs, name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - retStr=Utils.checkOutput(cmd.split()) + retStr=None + try: + retStr=Utils.checkOutput(cmd.split()) + except subprocess.CalledProcessError as ex: + msg=ex.output.decode("utf-8") + msg="ERROR: Failed to import account owner key %s. %s" % (account.ownerPrivateKey, msg) + if exitOnError: + Utils.errorExit("%s" % (msg)) + Utils.Print("%s" % (msg)) + return None + #Utils.Print("create: %s" % (retStr)) m=p.search(retStr) if m is None: @@ -152,8 +162,14 @@ def getOpenWallets(self): p = re.compile(r'\s+\"(\w+)\s\*\",?\n', re.MULTILINE) cmd="%s %s wallet list" % (Utils.EosClientPath, self.endpointArgs) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - retStr=Utils.checkOutput(cmd.split()) - #Utils.Print("retStr: %s" % (retStr)) + retStr=None + try: + retStr=Utils.checkOutput(cmd.split()) + except subprocess.CalledProcessError as ex: + msg=ex.output.decode("utf-8") + Utils.Print("ERROR: Failed to open wallets. %s" % (msg)) + return False + m=p.findall(retStr) if m is None: Utils.Print("ERROR: wallet list parser failure") @@ -168,8 +184,13 @@ def getKeys(self, wallet): p = re.compile(r'\n\s+\"(\w+)\"\n', re.MULTILINE) cmd="%s %s wallet private_keys --name %s --password %s " % (Utils.EosClientPath, self.endpointArgs, wallet.name, wallet.password) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - retStr=Utils.checkOutput(cmd.split()) - #Utils.Print("retStr: %s" % (retStr)) + retStr=None + try: + retStr=Utils.checkOutput(cmd.split()) + except subprocess.CalledProcessError as ex: + msg=ex.output.decode("utf-8") + Utils.Print("ERROR: Failed to get keys. %s" % (msg)) + return False m=p.findall(retStr) if m is None: Utils.Print("ERROR: wallet private_keys parser failure") From 14ec763a51ba94ca236a5189a6addd506d79ba0d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 6 Sep 2018 09:49:45 -0500 Subject: [PATCH 080/194] Cleanup of errorExit method. GH #5199 --- tests/Cluster.py | 4 ++-- tests/Node.py | 12 ++++++------ tests/WalletMgr.py | 2 +- tests/nodeos_under_min_avail_ram.py | 5 +++-- tests/nodeos_voting_test.py | 18 +++++++++--------- 5 files changed, 21 insertions(+), 20 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index a9987960949..e71649c5f29 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -446,10 +446,10 @@ def populateWallet(self, accountsCount, wallet): def getNode(self, nodeId=0, exitOnError=True): if exitOnError and nodeId >= len(self.nodes): Utils.cmdError("cluster never created node %d" % (nodeId)) - errorExit("Failed to retrieve node %d" % (nodeId)) + Utils.errorExit("Failed to retrieve node %d" % (nodeId)) if exitOnError and self.nodes[nodeId] is None: Utils.cmdError("cluster has None value for node %d" % (nodeId)) - errorExit("Failed to retrieve node %d" % (nodeId)) + Utils.errorExit("Failed to retrieve node %d" % (nodeId)) return self.nodes[nodeId] def getNodes(self): diff --git a/tests/Node.py b/tests/Node.py index d0568daedfb..db90cef6d82 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -289,7 +289,7 @@ def getTransactionMdb(self, transId, silentErrors=False, exitOnError=False): errorMsg="Exception during get db node get trans in mongodb with transaction id=%s. %s" % (transId,msg) if exitOnError: Utils.cmdError("" % (errorMsg)) - errorExit("Failed to retrieve transaction in mongodb for transaction id=%s" % (transId)) + Utils.errorExit("Failed to retrieve transaction in mongodb for transaction id=%s" % (transId)) elif not silentErrors: Utils.Print("ERROR: %s" % (errorMsg)) return None @@ -469,7 +469,7 @@ def getEosAccountFromDb(self, name, exitOnError=False): msg=ex.output.decode("utf-8") if exitOnError: Utils.cmdError("Exception during get account from db for %s. %s" % (name, msg)) - errorExit("Failed during get account from db for %s. %s" % (name, msg)) + Utils.errorExit("Failed during get account from db for %s. %s" % (name, msg)) Utils.Print("ERROR: Exception during get account from db for %s. %s" % (name, msg)) return None @@ -596,12 +596,12 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False Utils.Print("ERROR: Exception during funds transfer. %s" % (msg)) if exitOnError: Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination)) - errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) + Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) return None if trans is None: Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination)) - errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) + Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -901,7 +901,7 @@ def processCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg exitMsg="" if exitOnError and trans is None: Utils.cmdError("could not %s - %s" % (cmdDesc,exitMsg)) - errorExit("Failed to %s" % (cmdDesc)) + Utils.errorExit("Failed to %s" % (cmdDesc)) return trans @@ -913,7 +913,7 @@ def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False) if not self.waitForTransInBlock(transId): if exitOnError: Utils.cmdError("transaction with id %s never made it to a block" % (transId)) - errorExit("Failed to find transaction with id %s in a block before timeout" % (transId)) + Utils.errorExit("Failed to find transaction with id %s in a block before timeout" % (transId)) return None return trans diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 4edda77d7d9..870cd41a0da 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -71,7 +71,7 @@ def create(self, name, accounts=None, exitOnError=True): if m is None: if exitOnError: Utils.cmdError("could not create wallet %s" % (name)) - errorExit("Failed to create wallet %s" % (name)) + Utils.errorExit("Failed to create wallet %s" % (name)) Utils.Print("ERROR: wallet password parser failure") return None diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 016dfbceb8c..d06c1fe9de9 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -13,6 +13,9 @@ import math import re +Print=Utils.Print +errorExit=Utils.errorExit + class NamedAccounts: def __init__(self, cluster, numAccounts): @@ -50,8 +53,6 @@ def setName(self, num): # --dump-error-details # --keep-logs ############################################################### -Print=Utils.Print -errorExit=Utils.errorExit args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}) Utils.Debug=args.v diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index dac5f8dd4a5..b560795b6a7 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -36,7 +36,7 @@ def getBlockProducer(node, blockNum): blockProducer=block["producer"] if blockProducer is None: Utils.cmdError("could not get producer for block number %s" % (blockNum)) - errorExit("Failed to get block's producer") + Utils.errorExit("Failed to get block's producer") return blockProducer def getNodeNum(cluster, node): @@ -55,10 +55,10 @@ def validBlockProducer(prodsActive, prodsSeen, blockNum, node): blockProducer=getBlockProducer(node, blockNum) if blockProducer not in prodsActive: Utils.cmdError("unexpected block producer %s at blockNum=%s" % (blockProducer,blockNum)) - errorExit("Failed because of invalid block producer") + Utils.errorExit("Failed because of invalid block producer") if not prodsActive[blockProducer]: Utils.cmdError("block producer %s for blockNum=%s not elected, belongs to node %s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer])) - errorExit("Failed because of incorrect block producer") + Utils.errorExit("Failed because of incorrect block producer") prodsSeen[blockProducer]=True def getNextCleanProductionCycle(trans, node): @@ -149,7 +149,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): # each new set of 12 blocks should have a different blockProducer if lastBlockProducer is not None and lastBlockProducer==getBlockProducer(node, blockNum): Utils.cmdError("expected blockNum %s to be produced by any of the valid producers except %s" % (blockNum, lastBlockProducer)) - errorExit("Failed because of incorrect block producer order") + Utils.errorExit("Failed because of incorrect block producer order") # make sure that the next set of 12 blocks all have the same blockProducer lastBlockProducer=getBlockProducer(node, blockNum) @@ -167,14 +167,14 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): printStr+=" " newBlockNum+=1 Utils.cmdError("expected blockNum %s (started from %s) to be produced by %s, but produded by %s: round=%s, prod slot=%s, prod num=%s - %s" % (blockNum, startingFrom, lastBlockProducer, blockProducer, i, j, k, printStr)) - errorExit("Failed because of incorrect block producer order") + Utils.errorExit("Failed because of incorrect block producer order") blockNum+=1 # make sure that we have seen all 21 producers prodsSeenKeys=prodsSeen.keys() if len(prodsSeenKeys)!=21: Utils.cmdError("only saw %s producers of expected 21. At blockNum %s only the following producers were seen: %s" % (len(prodsSeenKeys), blockNum, ",".join(prodsSeenKeys))) - errorExit("Failed because of missing block producers") + Utils.errorExit("Failed because of missing block producers") Utils.Debug=temp @@ -211,14 +211,14 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): Print("Stand up cluster") if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False: Utils.cmdError("launcher") - errorExit("Failed to stand up eos cluster.") + Utils.errorExit("Failed to stand up eos cluster.") Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) accounts=cluster.createAccountKeys(5) if accounts is None: - errorExit("FAILURE - create keys") + Utils.errorExit("FAILURE - create keys") accounts[0].name="tester111111" accounts[1].name="tester222222" accounts[2].name="tester333333" @@ -232,7 +232,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): walletMgr.cleanup() if walletMgr.launch() is False: Utils.cmdError("%s" % (WalletdName)) - errorExit("Failed to stand up eos walletd.") + Utils.errorExit("Failed to stand up eos walletd.") testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]]) From bcd98026fa15273c4836c477603b35bef6c93b6d Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 6 Sep 2018 18:57:55 -0400 Subject: [PATCH 081/194] Revert "Simplify Wallet Tools EOSIO Blockchain Detection for End Users" --- plugins/chain_plugin/chain_plugin.cpp | 2 -- .../chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp | 4 +--- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index e080a2314d8..233b6048a46 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -950,8 +950,6 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params //std::bitset<64>(db.get_dynamic_global_properties().recent_slots_filled).to_string(), //__builtin_popcountll(db.get_dynamic_global_properties().recent_slots_filled) / 64.0, app().version_string(), - symbol().name(), - symbol().precision(), }; } diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index d92d84109aa..4d1abd7ede8 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -97,8 +97,6 @@ class read_only { //string recent_slots; //double participation_rate = 0; optional server_version_string; - optional core_symbol; - optional core_symbol_precision; }; get_info_results get_info(const get_info_params&) const; @@ -627,7 +625,7 @@ class chain_plugin : public plugin { FC_REFLECT( eosio::chain_apis::permission, (perm_name)(parent)(required_auth) ) FC_REFLECT(eosio::chain_apis::empty, ) FC_REFLECT(eosio::chain_apis::read_only::get_info_results, -(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string)(core_symbol)(core_symbol_precision) ) +(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string) ) FC_REFLECT(eosio::chain_apis::read_only::get_block_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_num_or_id)) From 0624889abf3b767ed05641b9ed8033a35e2197d2 Mon Sep 17 00:00:00 2001 From: Kayan Date: Fri, 7 Sep 2018 18:05:57 +0800 Subject: [PATCH 082/194] change "more" from bool to string --- plugins/chain_plugin/chain_plugin.cpp | 2 +- .../include/eosio/chain_plugin/chain_plugin.hpp | 2 +- tests/get_table_tests.cpp | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 0ea57d7cd63..1b435ecc708 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1143,7 +1143,7 @@ read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_o } } if (itr != upper) { - result.more = true; + result.more = (string)itr->scope; } return result; } diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 81c04b9b355..fba45312398 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -278,7 +278,7 @@ class read_only { }; struct get_table_by_scope_result { vector rows; - bool more = false; ///< true if last element in data is not the end and sizeof data() < limit + string more; ///< fill lower_bound with this value to fetch more rows }; get_table_by_scope_result get_table_by_scope( const get_table_by_scope_params& params )const; diff --git a/tests/get_table_tests.cpp b/tests/get_table_tests.cpp index d2e648977ab..818ca5562d2 100644 --- a/tests/get_table_tests.cpp +++ b/tests/get_table_tests.cpp @@ -80,7 +80,7 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { eosio::chain_apis::read_only::get_table_by_scope_result result = plugin.read_only::get_table_by_scope(param); BOOST_REQUIRE_EQUAL(4, result.rows.size()); - BOOST_REQUIRE_EQUAL(false, result.more); + BOOST_REQUIRE_EQUAL("", result.more); if (result.rows.size() >= 4) { BOOST_REQUIRE_EQUAL(name(N(eosio.token)), result.rows[0].code); BOOST_REQUIRE_EQUAL(name(N(inita)), result.rows[0].scope); @@ -97,7 +97,7 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { param.upper_bound = "initd"; result = plugin.read_only::get_table_by_scope(param); BOOST_REQUIRE_EQUAL(2, result.rows.size()); - BOOST_REQUIRE_EQUAL(false, result.more); + BOOST_REQUIRE_EQUAL("", result.more); if (result.rows.size() >= 2) { BOOST_REQUIRE_EQUAL(name(N(initb)), result.rows[0].scope); BOOST_REQUIRE_EQUAL(name(N(initc)), result.rows[1].scope); @@ -106,17 +106,17 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { param.limit = 1; result = plugin.read_only::get_table_by_scope(param); BOOST_REQUIRE_EQUAL(1, result.rows.size()); - BOOST_REQUIRE_EQUAL(true, result.more); + BOOST_REQUIRE_EQUAL("initc", result.more); param.table = name(0); result = plugin.read_only::get_table_by_scope(param); BOOST_REQUIRE_EQUAL(1, result.rows.size()); - BOOST_REQUIRE_EQUAL(true, result.more); + BOOST_REQUIRE_EQUAL("initc", result.more); param.table = N(invalid); result = plugin.read_only::get_table_by_scope(param); BOOST_REQUIRE_EQUAL(0, result.rows.size()); - BOOST_REQUIRE_EQUAL(false, result.more); + BOOST_REQUIRE_EQUAL("", result.more); } FC_LOG_AND_RETHROW() /// get_scope_test From ba4d5e4b236cc469aaf4ceb9340069551ee4c0ef Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 7 Sep 2018 14:52:44 -0500 Subject: [PATCH 083/194] Added command line parameter for setting trusted producers. GH #5268 --- libraries/chain/include/eosio/chain/controller.hpp | 2 ++ plugins/chain_plugin/chain_plugin.cpp | 3 +++ 2 files changed, 5 insertions(+) diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 21b9e5a6f9f..265b2e303fe 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -74,6 +74,8 @@ namespace eosio { namespace chain { validation_mode block_validation_mode = validation_mode::FULL; flat_set resource_greylist; + flat_set trusted_producers; + bool trusted_producer_light_validation = false; }; enum class block_status { diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 233b6048a46..6da793c3ff2 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -283,6 +283,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip "replace reversible block database with blocks imported from specified file and then exit") ("export-reversible-blocks", bpo::value(), "export reversible block database in portable format into specified file and then exit") + ("trusted-producer", bpo::value>()->composing(), "Indicate a producer whose blocks headers signed by it will be fully validated, but transactions in those validated blocks will be trusted.") ; } @@ -332,6 +333,8 @@ void chain_plugin::plugin_initialize(const variables_map& options) { LOAD_VALUE_SET( options, "contract-whitelist", my->chain_config->contract_whitelist ); LOAD_VALUE_SET( options, "contract-blacklist", my->chain_config->contract_blacklist ); + LOAD_VALUE_SET( options, "trusted-producer", my->chain_config->trusted_producers ); + if( options.count( "action-blacklist" )) { const std::vector& acts = options["action-blacklist"].as>(); auto& list = my->chain_config->action_blacklist; From 6adc0220e5cabaa3a6335a3aa118d78f1a1dcc9e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 7 Sep 2018 14:56:29 -0500 Subject: [PATCH 084/194] Inverted check for flag for no auto keosd. GH #5199 --- programs/cleos/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index b829befe467..20a0823d86e 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -789,7 +789,7 @@ void try_local_port( const string& lo_address, uint16_t port, uint32_t duration } void ensure_keosd_running(CLI::App* app) { - if (!no_auto_keosd) + if (no_auto_keosd) return; // get, version, net do not require keosd if (tx_skip_sign || app->got_subcommand("get") || app->got_subcommand("version") || app->got_subcommand("net")) From 8dab36d902e6911152a4428fbfc9f0392000c6a1 Mon Sep 17 00:00:00 2001 From: Greg Lee Date: Sat, 8 Sep 2018 08:55:01 -0400 Subject: [PATCH 085/194] Spelling and whitespace corrections --- contracts/eosiolib/transaction.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contracts/eosiolib/transaction.h b/contracts/eosiolib/transaction.h index 99e1d3c2c46..dd7c05ded17 100644 --- a/contracts/eosiolib/transaction.h +++ b/contracts/eosiolib/transaction.h @@ -13,7 +13,7 @@ extern "C" { * * * Deferred transactions will not be processed until a future block. They - * can therefore have no effect on the success of failure of their parent + * can therefore have no effect on the success or failure of their parent * transaction so long as they appear well formed. If any other condition * causes the parent transaction to be marked as failing, then the deferred * transaction will never be processed. @@ -27,7 +27,7 @@ extern "C" { * ends such that the success or failure of the parent transaction is * dependent on the success of the message. If an inline message fails in * processing then the whole tree of transactions and actions rooted in the - * block will me marked as failing and none of effects on the database will + * block will be marked as failing and none of effects on the database will * persist. * * Inline actions and Deferred transactions must adhere to the permissions @@ -68,7 +68,7 @@ extern "C" { * @return 1 if transaction was canceled, 0 if transaction was not found * * Example: -* + * * @code * id = 0xffffffffffffffff * cancel_deferred( id ); From b0a48f5da1f65ef215b227f3ad5924b17fe781ad Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Sun, 9 Sep 2018 00:16:58 +0900 Subject: [PATCH 086/194] Add cleos support `open` token balance row before transfer --- programs/cleos/main.cpp | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index b829befe467..6ca23738594 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -560,6 +560,18 @@ fc::variant regproducer_variant(const account_name& producer, const public_key_t ; } +chain::action create_open(const string& contract, const name& owner, asset amount, const name& ram_payer) { + auto open_ = fc::mutable_variant_object + ("owner", owner) + ("symbol", amount.get_symbol()) + ("ram_payer", ram_payer); + + return action { + tx_permission.empty() ? vector{{ram_payer,config::active_name}} : get_account_permissions(tx_permission), + contract, "open", variant_to_bin( contract, N(open), open_ ) + }; +} + chain::action create_transfer(const string& contract, const name& sender, const name& recipient, asset amount, const string& memo ) { auto transfer = fc::mutable_variant_object @@ -2395,12 +2407,14 @@ int main( int argc, char** argv ) { string recipient; string amount; string memo; + bool pay_ram = false; auto transfer = app.add_subcommand("transfer", localized("Transfer EOS from account to account"), false); transfer->add_option("sender", sender, localized("The account sending EOS"))->required(); transfer->add_option("recipient", recipient, localized("The account receiving EOS"))->required(); transfer->add_option("amount", amount, localized("The amount of EOS to send"))->required(); transfer->add_option("memo", memo, localized("The memo for the transfer")); transfer->add_option("--contract,-c", con, localized("The contract which controls the token")); + transfer->add_flag("--pay-ram-to-open", pay_ram, localized("Pay ram to open recipient's token balance row")); add_standard_transaction_options(transfer, "sender@active"); transfer->set_callback([&] { @@ -2410,7 +2424,13 @@ int main( int argc, char** argv ) { tx_force_unique = false; } - send_actions({create_transfer(con, sender, recipient, to_asset(con, amount), memo)}); + auto transfer = create_transfer(con, sender, recipient, to_asset(con, amount), memo); + if (!pay_ram) { + send_actions( { transfer }); + } else { + auto open_ = create_open(con, recipient, to_asset(con, amount), sender); + send_actions( { open_, transfer } ); + } }); // Net subcommand From 000a80fa781f3e62762c7f10b10e4f3473cfc4a2 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Sun, 9 Sep 2018 00:37:19 +0900 Subject: [PATCH 087/194] Remove unused variable --- programs/cleos/main.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index b829befe467..0318e9beacd 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -568,11 +568,6 @@ chain::action create_transfer(const string& contract, const name& sender, const ("quantity", amount) ("memo", memo); - auto args = fc::mutable_variant_object - ("code", contract) - ("action", "transfer") - ("args", transfer); - return action { tx_permission.empty() ? vector{{sender,config::active_name}} : get_account_permissions(tx_permission), contract, "transfer", variant_to_bin( contract, N(transfer), transfer ) From bb79184d739de1344fe1d1d242fc73897ab5f7df Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 28 Aug 2018 17:52:19 -0400 Subject: [PATCH 088/194] Build secp256k1 as an external MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bundle our secp256k1 library as an external. This removes one external dependency (making it a little easier for packaging and future updates), makes the build of secp256k1 use same compiler & flags as the main applications, and fixes a bug where the configure script on macOS wasn’t enabling x86_64 asm operations. --- CMakeLists.txt | 2 - CMakeModules/EosioTester.cmake.in | 1 - CMakeModules/EosioTesterBuild.cmake.in | 1 - Docker/Dockerfile | 2 +- Docker/dev/Dockerfile | 2 +- libraries/fc | 2 +- scripts/eosio_build_amazon.sh | 57 -------------------------- scripts/eosio_build_centos.sh | 57 -------------------------- scripts/eosio_build_darwin.sh | 56 ------------------------- scripts/eosio_build_fedora.sh | 57 -------------------------- scripts/eosio_build_ubuntu.sh | 57 -------------------------- 11 files changed, 3 insertions(+), 291 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 76f7afa7a91..2423dcc196e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -74,8 +74,6 @@ if ("${OPENSSL_ROOT_DIR}" STREQUAL "") endif() endif() -find_package(Secp256k1 REQUIRED) - if(UNIX) if(APPLE) set(whole_archive_flag "-force_load") diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 0827b00d700..48dcddffa70 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -59,7 +59,6 @@ find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib) find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib) find_library(libchainbase chainbase @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libbuiltins builtins @CMAKE_INSTALL_FULL_LIBDIR@) -find_library(libsecp256k1 secp256k1 @Secp256k1_ROOT_DIR@/lib) macro(add_eosio_test test_name) add_executable( ${test_name} ${ARGN} ) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index e4243aff86f..06451ff1650 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -59,7 +59,6 @@ find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib) find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib) find_library(libchainbase chainbase @CMAKE_BINARY_DIR@/libraries/chainbase) find_library(libbuiltins builtins @CMAKE_BINARY_DIR@/libraries/builtins) -find_library(libsecp256k1 secp256k1 @Secp256k1_ROOT_DIR@/lib) macro(add_eosio_test test_name) add_executable( ${test_name} ${ARGN} ) diff --git a/Docker/Dockerfile b/Docker/Dockerfile index ffde4a65c07..24dd447ed75 100644 --- a/Docker/Dockerfile +++ b/Docker/Dockerfile @@ -5,7 +5,7 @@ ARG symbol=SYS RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ && cmake -H. -B"/tmp/build" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/tmp/build -DSecp256k1_ROOT_DIR=/usr/local -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ + -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/tmp/build -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ && cmake --build /tmp/build --target install && rm /tmp/build/bin/eosiocpp diff --git a/Docker/dev/Dockerfile b/Docker/dev/Dockerfile index 7df9a182b37..f2dea74ac6c 100644 --- a/Docker/dev/Dockerfile +++ b/Docker/dev/Dockerfile @@ -5,7 +5,7 @@ ARG symbol=SYS RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ && cmake -H. -B"/opt/eosio" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eosio -DSecp256k1_ROOT_DIR=/usr/local -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ + -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eosio -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ && cmake --build /opt/eosio --target install \ && cp /eos/Docker/config.ini / && ln -s /opt/eosio/contracts /contracts && cp /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh && ln -s /eos/tutorials /tutorials diff --git a/libraries/fc b/libraries/fc index b9d51de0dc0..a4a4f20a9db 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit b9d51de0dc09ad5e48ef3a6179ec579b351ae6cc +Subproject commit a4a4f20a9db606319330e605cbead806f965dda5 diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index dc54cbfe0bd..4fd12dd3ad0 100644 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -528,63 +528,6 @@ fi printf "\\tMongo C++ driver found at /usr/local/lib64/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - printf "\\tInstalling secp256k1-zkp (Cryptonomex branch).\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\n\\tUnable to cd into directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tError running configure for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tError removing directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tsecp256k1 successfully installed @ /usr/local/lib/libsecp256k1.a.\\n" - else - printf "\\tsecp256k1 found @ /usr/local/lib/libsecp256k1.a.\\n" - fi - printf "\\n\\tChecking LLVM with WASM support.\\n" if [ ! -d "${HOME}/opt/wasm/bin" ]; then printf "\\tInstalling LLVM & WASM.\\n" diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index de0193a4b42..085d80839fb 100644 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -618,63 +618,6 @@ mongodconf printf "\\tMongo C++ driver found at /usr/local/lib64/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - printf "\\tInstalling secp256k1-zkp (Cryptonomex branch).\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\n\\tUnable to cd into directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tError running configure for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tError removing directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\n\\tsecp256k1 successfully installed @ /usr/local/lib.\\n\\n" - else - printf "\\tsecp256k1 found @ /usr/local/lib.\\n" - fi - printf "\\n\\tChecking LLVM with WASM support installation.\\n" if [ ! -d "${HOME}/opt/wasm/bin" ]; then printf "\\n\\tInstalling LLVM with WASM\\n" diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index aac05df8f43..c6e28fb73f6 100644 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -396,62 +396,6 @@ printf "\\tMongo C++ driver found at /usr/local/lib/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tUnable to enter directory %s/secp256k1-zkp.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tConfiguring secp256k1-zkp has returned the above error.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${CPU_CORE}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tInstalling secp256k1-zkp has returned the above error.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tUnable to remove directory %s/secp256k1-zkp56k1-zkp.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\n\\n\\tSuccessffully installed secp256k1 @ /usr/local/lib/.\\n\\n" - else - printf "\\tsecp256k1 found at /usr/local/lib/.\\n" - fi - printf "\\n\\tChecking LLVM with WASM support.\\n" if [ ! -d /usr/local/wasm/bin ]; then if ! cd "${TEMP_DIR}" diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 2bfbf0ec28d..35cad3d7d8e 100644 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -393,63 +393,6 @@ printf "\\tMongo C++ driver found at /usr/local/lib64/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - printf "\\tInstalling secp256k1-zkp (Cryptonomex branch).\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}/secp256k1-zkp" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tError running configure for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tError removing directory %s.\\n" "${TEMP_DIR}/secp256k1-zkp" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\n\\tsecp256k1 successfully installed @ /usr/local/lib.\\n\\n" - else - printf "\\tsecp256k1 found @ /usr/local/lib.\\n" - fi - printf "\\n\\tChecking LLVM with WASM support installation.\\n" if [ ! -d "${HOME}/opt/wasm/bin" ]; then printf "\\tInstalling LLVM & WASM\\n" diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index c1451a555c0..4c9873a60a1 100644 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -421,63 +421,6 @@ mongodconf printf "\\tMongo C++ driver found at /usr/local/lib/libmongocxx-static.a.\\n" fi - printf "\\n\\tChecking secp256k1-zkp installation.\\n" - # install secp256k1-zkp (Cryptonomex branch) - if [ ! -e "/usr/local/lib/libsecp256k1.a" ]; then - printf "\\tInstalling secp256k1-zkp (Cryptonomex branch).\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone https://github.com/cryptonomex/secp256k1-zkp.git - then - printf "\\tUnable to clone repo secp256k1-zkp @ https://github.com/cryptonomex/secp256k1-zkp.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}/secp256k1-zkp" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! ./autogen.sh - then - printf "\\tError running autogen for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./configure - then - printf "\\tError running configure for secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing secp256k1-zkp.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/secp256k1-zkp" - then - printf "\\tError removing directory %s.\\n" "${TEMP_DIR}/secp256k1-zkp" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\n\\tsecp256k1 successfully installed @ /usr/local/lib.\\n\\n" - else - printf "\\tsecp256k1 found @ /usr/local/lib.\\n" - fi - printf "\\n\\tChecking for LLVM with WASM support.\\n" if [ ! -d "${HOME}/opt/wasm/bin" ]; then # Build LLVM and clang with WASM support: From e70e6f84ba2b86213ca36b9d240ef4ad3f481f3f Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 2 Aug 2018 13:44:02 -0500 Subject: [PATCH 089/194] Added BlockType and plumbing for methods oriented for head block number to also support LIB. GH #4973 --- tests/Cluster.py | 21 +++++++------ tests/Node.py | 77 ++++++++++++++++++++++++++++++------------------ 2 files changed, 61 insertions(+), 37 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index e71649c5f29..329ab902362 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -18,6 +18,7 @@ from core_symbol import CORE_SYMBOL from testUtils import Utils from testUtils import Account +from Node import BlockType from Node import Node from WalletMgr import WalletMgr @@ -308,25 +309,27 @@ def setNodes(self, nodes): """manually set nodes, alternative to explicit launch""" self.nodes=nodes - def waitOnClusterSync(self, timeout=None): + def waitOnClusterSync(self, timeout=None, blockType=BlockType.head): """Get head block on node 0, then ensure the block is present on every cluster node.""" assert(self.nodes) assert(len(self.nodes) > 0) - targetHeadBlockNum=self.nodes[0].getHeadBlockNum() #get root nodes head block num - if Utils.Debug: Utils.Print("Head block number on root node: %d" % (targetHeadBlockNum)) - if targetHeadBlockNum == -1: + node=self.nodes[0] + targetBlockNum=node.getBlockNum(blockType) #retrieve node 0's head or irrevercible block number + if Utils.Debug: + Utils.Print("%s block number on root node: %d" % (blockType.type, targetBlockNum)) + if targetBlockNum == -1: return False - return self.waitOnClusterBlockNumSync(targetHeadBlockNum, timeout) + return self.waitOnClusterBlockNumSync(targetBlockNum, timeout) - def waitOnClusterBlockNumSync(self, targetBlockNum, timeout=None): + def waitOnClusterBlockNumSync(self, targetBlockNum, timeout=None, blockType=BlockType.head): """Wait for all nodes to have targetBlockNum finalized.""" assert(self.nodes) - def doNodesHaveBlockNum(nodes, targetBlockNum): + def doNodesHaveBlockNum(nodes, targetBlockNum, blockType): for node in nodes: try: - if (not node.killed) and (not node.isBlockPresent(targetBlockNum)): + if (not node.killed) and (not node.isBlockPresent(targetBlockNum, blockType=blockType)): return False except (TypeError) as _: # This can happen if client connects before server is listening @@ -334,7 +337,7 @@ def doNodesHaveBlockNum(nodes, targetBlockNum): return True - lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum) + lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType) ret=Utils.waitForBool(lam, timeout) return ret diff --git a/tests/Node.py b/tests/Node.py index db90cef6d82..e9a18103470 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -11,6 +11,12 @@ from testUtils import Utils from testUtils import Account +def addEnum(enumClassType, type): + setattr(enumClassType, type, enumClassType(type)) + +def unhandledEnumType(type): + raise RuntimeError("No case defined for type=%s" % (type.type)) + class ReturnType: def __init__(self, type): @@ -19,8 +25,19 @@ def __init__(self, type): def __str__(self): return self.type -setattr(ReturnType, "raw", ReturnType("raw")) -setattr(ReturnType, "json", ReturnType("json")) +addEnum(ReturnType, "raw") +addEnum(ReturnType, "json") + +class BlockType: + + def __init__(self, type): + self.type=type + + def __str__(self): + return self.type + +addEnum(BlockType, "head") +addEnum(BlockType, "lib") # pylint: disable=too-many-public-methods class Node(object): @@ -209,43 +226,38 @@ def getBlockByIdMdb(self, blockId, silentErrors=False): return None - def isBlockPresent(self, blockNum): - """Does node have head_block_num >= blockNum""" + def isBlockPresent(self, blockNum, blockType=BlockType.head): + """Does node have head_block_num/last_irreversible_block_num >= blockNum""" assert isinstance(blockNum, int) + assert isinstance(blockType, BlockType) assert (blockNum > 0) info=self.getInfo(silentErrors=True, exitOnError=True) node_block_num=0 try: - node_block_num=int(info["head_block_num"]) + if blockType==BlockType.head: + node_block_num=int(info["head_block_num"]) + elif blockType==BlockType.lib: + node_block_num=int(info["last_irreversible_block_num"]) + else: + unhandledEnumType(blockType) + except (TypeError, KeyError) as _: - Utils.Print("Failure in get info parsing. %s" % (info)) + Utils.Print("Failure in get info parsing %s block. %s" % (blockType.type, info)) raise - return True if blockNum <= node_block_num else False + present = True if blockNum <= node_block_num else False + if Utils.Debug and blockType==BlockType.lib: + decorator="" + if present: + decorator="is not " + Utils.Print("Block %d is %sfinalized." % (blockNum, decorator)) + + return present def isBlockFinalized(self, blockNum): """Is blockNum finalized""" - assert(blockNum) - assert isinstance(blockNum, int) - assert (blockNum > 0) - - info=self.getInfo(silentErrors=True, exitOnError=True) - node_block_num=0 - try: - node_block_num=int(info["last_irreversible_block_num"]) - except (TypeError, KeyError) as _: - Utils.Print("Failure in get info parsing. %s" % (info)) - raise - - finalized = True if blockNum <= node_block_num else False - if Utils.Debug: - if finalized: - Utils.Print("Block %d is finalized." % (blockNum)) - else: - Utils.Print("Block %d is not yet finalized." % (blockNum)) - - return finalized + return self.isBlockPresent(blockNum, blockType=BlockType.lib) # pylint: disable=too-many-branches def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayedRetry=True): @@ -411,7 +423,7 @@ def isTransFinalized(self, transId): return False assert(isinstance(blockId, int)) - return self.isBlockFinalized(blockId) + return self.isBlockPresent(blockId, blockType=BlockType.lib) # Create & initialize account and return creation transactions. Return transaction json object @@ -972,6 +984,15 @@ def getIrreversibleBlockNum(self): return blockNum return None + def getBlockNum(self, blockType): + assert isinstance(blockType, BlockType) + if blockType==BlockType.head: + return self.getHeadBlockNum() + elif blockType==BlockType.lib: + return self.getIrreversibleBlockNum() + else: + unhandledEnumType(blockType) + def kill(self, killSignal): if Utils.Debug: Utils.Print("Killing node: %s" % (self.cmd)) assert(self.pid is not None) From 28e84f30be8c2000d6b16caf396be531be293161 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 21 Aug 2018 07:20:44 -0500 Subject: [PATCH 090/194] Added plugin to allow shutting down node for a given producer, at a given point in its head or lib production. GH #4973 --- plugins/CMakeLists.txt | 2 + .../test_control_api_plugin/CMakeLists.txt | 7 + .../test_control_api_plugin.hpp | 34 ++++ .../test_control_api_plugin.cpp | 94 ++++++++++ plugins/test_control_plugin/CMakeLists.txt | 9 + .../test_control_plugin.hpp | 67 +++++++ .../test_control_plugin.cpp | 166 ++++++++++++++++++ programs/nodeos/CMakeLists.txt | 4 +- 8 files changed, 382 insertions(+), 1 deletion(-) create mode 100644 plugins/test_control_api_plugin/CMakeLists.txt create mode 100644 plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp create mode 100644 plugins/test_control_api_plugin/test_control_api_plugin.cpp create mode 100644 plugins/test_control_plugin/CMakeLists.txt create mode 100644 plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp create mode 100644 plugins/test_control_plugin/test_control_plugin.cpp diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 06b0162fe5b..9b0b17b9d0a 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -17,6 +17,8 @@ add_subdirectory(db_size_api_plugin) #add_subdirectory(faucet_testnet_plugin) add_subdirectory(mongo_db_plugin) add_subdirectory(login_plugin) +add_subdirectory(test_control_plugin) +add_subdirectory(test_control_api_plugin) # Forward variables to top level so packaging picks them up set(CPACK_DEBIAN_PACKAGE_DEPENDS ${CPACK_DEBIAN_PACKAGE_DEPENDS} PARENT_SCOPE) diff --git a/plugins/test_control_api_plugin/CMakeLists.txt b/plugins/test_control_api_plugin/CMakeLists.txt new file mode 100644 index 00000000000..0a36991e90b --- /dev/null +++ b/plugins/test_control_api_plugin/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB HEADERS "include/eosio/test_control_api_plugin/*.hpp") +add_library( test_control_api_plugin + test_control_api_plugin.cpp + ${HEADERS} ) + +target_link_libraries( test_control_api_plugin test_control_plugin chain_plugin http_plugin appbase ) +target_include_directories( test_control_api_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp b/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp new file mode 100644 index 00000000000..feac39a95ff --- /dev/null +++ b/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp @@ -0,0 +1,34 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once +#include +#include + +#include +#include + +namespace eosio { + using eosio::chain::controller; + using std::unique_ptr; + using namespace appbase; + + class test_control_api_plugin : public plugin { + public: + APPBASE_PLUGIN_REQUIRES((test_control_plugin)(chain_plugin)(http_plugin)) + + test_control_api_plugin(); + virtual ~test_control_api_plugin(); + + virtual void set_program_options(options_description&, options_description&) override; + + void plugin_initialize(const variables_map&); + void plugin_startup(); + void plugin_shutdown(); + + private: + unique_ptr my; + }; + +} diff --git a/plugins/test_control_api_plugin/test_control_api_plugin.cpp b/plugins/test_control_api_plugin/test_control_api_plugin.cpp new file mode 100644 index 00000000000..8bca87b2167 --- /dev/null +++ b/plugins/test_control_api_plugin/test_control_api_plugin.cpp @@ -0,0 +1,94 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#include +#include + +#include + +namespace eosio { + +static appbase::abstract_plugin& _test_control_api_plugin = app().register_plugin(); + +using namespace eosio; + +class test_control_api_plugin_impl { +public: + test_control_api_plugin_impl(controller& db) + : db(db) {} + + controller& db; +}; + + +test_control_api_plugin::test_control_api_plugin(){} +test_control_api_plugin::~test_control_api_plugin(){} + +void test_control_api_plugin::set_program_options(options_description&, options_description&) {} +void test_control_api_plugin::plugin_initialize(const variables_map&) {} + +struct async_result_visitor : public fc::visitor { + template + std::string operator()(const T& v) const { + return fc::json::to_string(v); + } +}; + +#define CALL(api_name, api_handle, api_namespace, call_name, http_response_code) \ +{std::string("/v1/" #api_name "/" #call_name), \ + [this, api_handle](string, string body, url_response_callback cb) mutable { \ + wlog("test_control_api_plugin CALL"); \ + try { \ + if (body.empty()) body = "{}"; \ + auto result = api_handle.call_name(fc::json::from_string(body).as()); \ + cb(http_response_code, fc::json::to_string(result)); \ + } catch (...) { \ + http_plugin::handle_exception(#api_name, #call_name, body, cb); \ + } \ + }} + +#define TEST_CONTROL_RW_CALL(call_name, http_response_code) CALL(test_control, rw_api, test_control_apis::read_write, call_name, http_response_code) + +/* +#define CALL(api_name, api_handle, api_namespace, call_name, http_response_code) \ +{std::string("/v1/" #api_name "/" #call_name), \ + [this, api_handle](string, string body, url_response_callback cb) mutable { \ + try { \ + wlog("test_control_api_plugin CALL"); \ + wlog("test_control_api_plugin CALL body=${body}",("body",body)); \ + if (body.empty()) body = "{}"; \ + auto result = api_handle.call_name(fc::json::from_string(body).as()); \ + cb(http_response_code, fc::json::to_string(result)); \ + } catch (...) { \ + http_plugin::handle_exception(#api_name, #call_name, body, cb); \ + } \ + }} + +#define TEST_CONTROL_RW_CALL(call_name, http_response_code) CALL(test_control, rw_api, test_control_apis::read_write, call_name, http_response_code) +*/ +void test_control_api_plugin::plugin_startup() { + ilog( "starting test_control_api_plugin" ); +// auto& chain = app().get_plugin().chain(); + my.reset(new test_control_api_plugin_impl(app().get_plugin().chain())); + auto rw_api = app().get_plugin().get_read_write_api(); + + app().get_plugin().add_api({ +// TEST_CONTROL_RW_CALL(kill_node_on_producer, 202) + {std::string("/v1/test_control/kill_node_on_producer"), + [this, rw_api](string, string body, url_response_callback cb) mutable { + wlog("test_control_api_plugin CALL"); + try { + if (body.empty()) body = "{}"; + auto result = rw_api.kill_node_on_producer(fc::json::from_string(body).as()); + cb(202, fc::json::to_string(result)); + } catch (...) { + http_plugin::handle_exception("rw_api", "kill_node_on_producer", body, cb); + } + }} + }); +} + +void test_control_api_plugin::plugin_shutdown() {} + +} diff --git a/plugins/test_control_plugin/CMakeLists.txt b/plugins/test_control_plugin/CMakeLists.txt new file mode 100644 index 00000000000..aa6b1cff397 --- /dev/null +++ b/plugins/test_control_plugin/CMakeLists.txt @@ -0,0 +1,9 @@ +file(GLOB HEADERS "include/eosio/test_control_plugin/*.hpp") + +add_library( test_control_plugin + test_control_plugin.cpp + ${HEADERS} ) + +target_link_libraries( test_control_plugin producer_plugin chain_plugin http_client_plugin appbase eosio_chain eos_utilities ) +target_include_directories( test_control_plugin + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp b/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp new file mode 100644 index 00000000000..0a40d9b6e36 --- /dev/null +++ b/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp @@ -0,0 +1,67 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once +#include +#include +#include +#include + +namespace fc { class variant; } + +namespace eosio { + using namespace appbase; + typedef std::shared_ptr test_control_ptr; + +namespace test_control_apis { +struct empty{}; + +class read_write { + + public: + read_write(const test_control_ptr& test_control) + : my(test_control) {} + + struct kill_node_on_producer_params { + name producer; + uint32_t where_in_sequence; + bool based_on_lib; + }; + using kill_node_on_producer_results = empty; + kill_node_on_producer_results kill_node_on_producer(const kill_node_on_producer_params& params) const; + + private: + test_control_ptr my; +}; + + +} // namespace test_control_apis + + +class test_control_plugin : public plugin { +public: + APPBASE_PLUGIN_REQUIRES((chain_plugin)) + + test_control_plugin(); + test_control_plugin(const test_control_plugin&) = delete; + test_control_plugin(test_control_plugin&&) = delete; + test_control_plugin& operator=(const test_control_plugin&) = delete; + test_control_plugin& operator=(test_control_plugin&&) = delete; + virtual ~test_control_plugin() override = default; + + virtual void set_program_options(options_description& cli, options_description& cfg) override; + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + + test_control_apis::read_write get_read_write_api() const { return test_control_apis::read_write(my); } + +private: + test_control_ptr my; +}; + +} + +FC_REFLECT(eosio::test_control_apis::empty, ) +FC_REFLECT(eosio::test_control_apis::read_write::kill_node_on_producer_params, (producer)(where_in_sequence)(based_on_lib) ) diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp new file mode 100644 index 00000000000..ce34a5f5893 --- /dev/null +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -0,0 +1,166 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#include +#include +#include + +namespace fc { class variant; } + +namespace eosio { + +static appbase::abstract_plugin& _test_control_plugin = app().register_plugin(); + +class test_control_plugin_impl { +public: + test_control_plugin_impl(chain::controller& c) : _chain(c) {} + void connect(); + void disconnect(); + void kill_on_lib(account_name prod, uint32_t where_in_seq); + void kill_on_head(account_name prod, uint32_t where_in_seq); + +private: + void accepted_block(const chain::block_state_ptr& bsp); + void applied_irreversible_block(const chain::block_state_ptr& bsp); + void retrieve_next_block_state(const chain::block_state_ptr& bsp); + void process_next_block_state(const chain::block_header_state& bhs); + + fc::optional _accepted_block_connection; + fc::optional _irreversible_block_connection; + chain::controller& _chain; + account_name _producer; + int32_t _where_in_sequence; + int32_t _producer_sequence; + bool _clean_producer_sequence; + std::atomic_bool _track_lib; + std::atomic_bool _track_head; +}; + +void test_control_plugin_impl::connect() { + wlog("test_control_plugin::kill_node_on_producer() lib"); + _irreversible_block_connection.emplace( + _chain.irreversible_block.connect( [&]( const chain::block_state_ptr& bs ) { + applied_irreversible_block( bs ); + } )); + wlog("test_control_plugin::kill_node_on_producer() head"); + _accepted_block_connection = + _chain.accepted_block.connect( [&]( const chain::block_state_ptr& bs ) { + accepted_block( bs ); + } ); + wlog("test_control_plugin::kill_node_on_producer() head connection created"); +} + +void test_control_plugin_impl::disconnect() { + _accepted_block_connection.reset(); + _irreversible_block_connection.reset(); +} + +void test_control_plugin_impl::applied_irreversible_block(const chain::block_state_ptr& bsp) { + wlog("test_control_plugin_impl::applied_irreversible_block()"); + if (_track_lib) + retrieve_next_block_state(bsp); +} + +void test_control_plugin_impl::accepted_block(const chain::block_state_ptr& bsp) { + wlog("test_control_plugin_impl::accepted_block()"); + if (_track_head) + retrieve_next_block_state(bsp); +} + +void test_control_plugin_impl::retrieve_next_block_state(const chain::block_state_ptr& bsp) { + const auto hbn = bsp->block_num; + auto new_block_header = bsp->header; + new_block_header.timestamp = new_block_header.timestamp.next(); + new_block_header.previous = bsp->id; + auto new_bs = bsp->generate_next(new_block_header.timestamp); + process_next_block_state(new_bs); +} + +void test_control_plugin_impl::process_next_block_state(const chain::block_header_state& bhs) { + const auto block_time = _chain.head_block_time() + fc::microseconds(chain::config::block_interval_us); + const auto& producer_name = bhs.get_scheduled_producer(block_time).producer_name; + // start counting sequences for this producer (once we + if (producer_name == _producer && _clean_producer_sequence) { + _producer_sequence += 1; + wlog("test_control_plugin_impl::process_next_block_state() seq=${seq}",("seq",_producer_sequence)); + + if (_producer_sequence >= _where_in_sequence) { + app().quit(); + } + } else if (producer_name != _producer) { + wlog("test_control_plugin_impl::process_next_block_state() reset"); + _producer_sequence = -1; + // can now guarantee we are at the start of the producer + _clean_producer_sequence = true; + } +} + +void test_control_plugin_impl::kill_on_lib(account_name prod, uint32_t where_in_seq) { + wlog("test_control_plugin_impl::kill_on_lib() 1"); + _track_head = false; + wlog("test_control_plugin_impl::kill_on_lib() 2"); + _producer = prod; + _where_in_sequence = static_cast(where_in_seq); + _producer_sequence = -1; + _clean_producer_sequence = false; + wlog("test_control_plugin_impl::kill_on_lib() 3"); + _track_lib = true; + wlog("test_control_plugin_impl::kill_on_lib() 4"); +} + +void test_control_plugin_impl::kill_on_head(account_name prod, uint32_t where_in_seq) { + wlog("test_control_plugin_impl::kill_on_head() 1"); + _track_lib = false; + wlog("test_control_plugin_impl::kill_on_head() 2"); + _producer = prod; + _where_in_sequence = static_cast(where_in_seq); + _producer_sequence = -1; + _clean_producer_sequence = false; + wlog("test_control_plugin_impl::kill_on_head() 3"); + _track_head = true; + wlog("test_control_plugin_impl::kill_on_head() 4"); +} + +test_control_plugin::test_control_plugin() +: my(new test_control_plugin_impl(app().get_plugin().chain())) +{ +} + +void test_control_plugin::set_program_options(options_description& cli, options_description& cfg) { + wlog("test_control_plugin::set_program_options()"); +} + +void test_control_plugin::plugin_initialize(const variables_map& options) { + wlog("test_control_plugin::plugin_initialize()"); +} + +void test_control_plugin::plugin_startup() { + wlog("test_control_plugin::plugin_startup()"); + my->connect(); +} + +void test_control_plugin::plugin_shutdown() { + wlog("test_control_plugin::plugin_shutdown()"); + my->disconnect(); +} + +namespace test_control_apis { +read_write::kill_node_on_producer_results read_write::kill_node_on_producer(const read_write::kill_node_on_producer_params& params) const { + wlog("test_control_plugin::kill_node_on_producer() ${prod} ${where_in_seq} ${head_lib}",("prod",params.producer.to_string())("where_in_seq",params.where_in_sequence)("head_lib", (params.based_on_lib ? "LIB" : "HEAD"))); + + if (params.based_on_lib) { + wlog("test_control_plugin::kill_node_on_producer() kill_on_lib"); + my->kill_on_lib(params.producer, params.where_in_sequence); + wlog("test_control_plugin::kill_node_on_producer() kill_on_lib done"); + } else { + wlog("test_control_plugin::kill_node_on_producer() kill_on_head"); + my->kill_on_head(params.producer, params.where_in_sequence); + wlog("test_control_plugin::kill_node_on_producer() kill_on_head done"); + } + return read_write::kill_node_on_producer_results{}; +} + +} // namespace test_control_apis + +} // namespace eosio diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 5c190f69145..82ce6470789 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -51,7 +51,7 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE -Wl,${whole_archive_flag} login_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_plugin -Wl,${no_whole_archive_flag} - PRIVATE -Wl,${whole_archive_flag} bnet_plugin -Wl,${no_whole_archive_flag} + PRIVATE -Wl,${whole_archive_flag} bnet_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} chain_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} net_plugin -Wl,${no_whole_archive_flag} @@ -60,6 +60,8 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} txn_test_gen_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} db_size_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} producer_api_plugin -Wl,${no_whole_archive_flag} + PRIVATE -Wl,${whole_archive_flag} test_control_plugin -Wl,${no_whole_archive_flag} + PRIVATE -Wl,${whole_archive_flag} test_control_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${build_id_flag} PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) From 9364f653d71eb9a3a2a589cbd6f149fc6248895c Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 21 Aug 2018 07:25:57 -0500 Subject: [PATCH 091/194] Added ability to provide command line arguments to individual nodeos instances using the node number. GH #4973 --- programs/eosio-launcher/main.cpp | 49 ++++++++++++++++++++++++++++---- tests/Cluster.py | 18 ++++++++++-- 2 files changed, 59 insertions(+), 8 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 55e96c9f1c2..155340d69bd 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -231,6 +231,9 @@ class eosd_def { return dot_label_str; } + string get_node_num() const { + return name.substr( name.length() - 2 ); + } private: string dot_label_str; }; @@ -408,6 +411,7 @@ struct launcher_def { bfs::path data_dir_base; bool skip_transaction_signatures = false; string eosd_extra_args; + std::map specific_nodeos_args; testnet_def network; string gelf_endpoint; vector aliases; @@ -463,6 +467,7 @@ struct launcher_def { void prep_remote_config_dir (eosd_def &node, host_def *host); void launch (eosd_def &node, string >s); void kill (launch_modes mode, string sig_opt); + static string get_node_num(uint16_t node_num); pair find_node(uint16_t node_num); vector> get_nodes(const string& node_number_list); void bounce (const string& node_numbers); @@ -484,7 +489,9 @@ launcher_def::set_options (bpo::options_description &cfg) { ("p2p-plugin", bpo::value()->default_value("net"),"select a p2p plugin to use (either net or bnet). Defaults to net.") ("genesis,g",bpo::value(&genesis)->default_value("./genesis.json"),"set the path to genesis.json") ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), "nodeos does not require transaction signatures.") - ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg in quotes") + ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg(s) in quotes") + ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag") + ("specific-nodeos", bpo::value>()->composing(), "forward nodeos command line argument(s) to its paired specific instance of nodeos(using \"--specific-num\"), enclose arg(s) in quotes") ("delay,d",bpo::value(&start_delay)->default_value(0),"seconds delay before starting each node after the first") ("boot",bpo::bool_switch(&boot)->default_value(false),"After deploying the nodes and generating a boot script, invoke it.") ("nogen",bpo::bool_switch(&nogen)->default_value(false),"launch nodes without writing new config files") @@ -527,6 +534,25 @@ launcher_def::initialize (const variables_map &vmap) { } } + if (vmap.count("specific-num")) { + const auto specific_nums = vmap["specific-num"].as>(); + const auto specific_args = vmap["specific-nodeos"].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; + exit (-1); + } + const auto total_nodes = vmap["nodes"].as(); + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= total_nodes) { + cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + specific_nodeos_args[num] = specific_args[i]; + } + } + using namespace std::chrono; system_clock::time_point now = system_clock::now(); std::time_t now_c = system_clock::to_time_t(now); @@ -1500,6 +1526,12 @@ launcher_def::launch (eosd_def &instance, string >s) { eosdcmd += eosd_extra_args + " "; } } + if (instance.name != "bios" && !specific_nodeos_args.empty()) { + const auto node_num = boost::lexical_cast(instance.get_node_num()); + if (specific_nodeos_args.count(node_num)) { + eosdcmd += specific_nodeos_args[node_num] + " "; + } + } if( add_enable_stale_production ) { eosdcmd += "--enable-stale-production true "; @@ -1597,11 +1629,16 @@ launcher_def::kill (launch_modes mode, string sig_opt) { } } +string +launcher_def::get_node_num(uint16_t node_num) { + string node_num_str = node_num < 10 ? "0":""; + node_num_str += boost::lexical_cast(node_num); + return node_num_str; +} + pair launcher_def::find_node(uint16_t node_num) { - string dex = node_num < 10 ? "0":""; - dex += boost::lexical_cast(node_num); - string node_name = network.name + dex; + const string node_name = network.name + get_node_num(node_num); for (const auto& host: bindings) { for (const auto& node: host.instances) { if (node_name == node.name) { @@ -1675,7 +1712,7 @@ launcher_def::bounce (const string& node_numbers) { for (auto node_pair: node_list) { const host_def& host = node_pair.first; const eosd_def& node = node_pair.second; - string node_num = node.name.substr( node.name.length() - 2 ); + const string node_num = node.get_node_num(); cout << "Bouncing " << node.name << endl; string cmd = "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; do_command(host, node.name, { { "EOSIO_HOME", host.eosio_home }, { "EOSIO_NODE", node_num } }, cmd); @@ -1688,7 +1725,7 @@ launcher_def::down (const string& node_numbers) { for (auto node_pair: node_list) { const host_def& host = node_pair.first; const eosd_def& node = node_pair.second; - string node_num = node.name.substr( node.name.length() - 2 ); + const string node_num = node.get_node_num(); cout << "Taking down " << node.name << endl; string cmd = "./scripts/eosio-tn_down.sh "; do_command(host, node.name, diff --git a/tests/Cluster.py b/tests/Cluster.py index 329ab902362..91c8966e1ee 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -99,7 +99,7 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-branches # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontKill=False - , dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True): + , dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -107,11 +107,14 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne topo: cluster topology (as defined by launcher) delay: delay between individual nodes launch (as defined by launcher) delay 0 exposes a bootstrap bug where producer handover may have a large gap confusing nodes and bringing system to a halt. - dontBootstrap: When true, don't do any bootstrapping at all. onlyBios: When true, only loads the bios contract (and not more full bootstrapping). + dontBootstrap: When true, don't do any bootstrapping at all. + extraNodeosArgs: string of arguments to pass through to each nodoes instance (via --nodeos flag on launcher) useBiosBootFile: determines which of two bootstrap methods is used (when both dontBootstrap and onlyBios are false). The default value of true uses the bios_boot.sh file generated by the launcher. A value of false uses manual bootstrapping in this script, which does not do things like stake votes for producers. + specificExtraNodeosArgs: dictionary of arguments to pass to a specific node (via --specific-num and + --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } """ if not self.localCluster: Utils.Print("WARNING: Cluster not local, not launching %s." % (Utils.EosServerName)) @@ -151,7 +154,18 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--nodeos") cmdArr.append(nodeosArgs) + if specificExtraNodeosArgs is not None: + assert(isinstance(specificExtraNodeosArgs, dict)) + for nodeNum,arg in specificExtraNodeosArgs.items(): + assert(isinstance(nodeNum, (str,int))) + assert(isinstance(arg, str)) + cmdArr.append("--specific-num") + cmdArr.append(str(nodeNum)) + cmdArr.append("--specific-nodeos") + cmdArr.append(arg) + Cluster.__LauncherCmdArr = cmdArr.copy() + s=" ".join(cmdArr) if Utils.Debug: Utils.Print("cmd: %s" % (s)) if 0 != subprocess.call(cmdArr): From 46f34137f8ec11439b93f7203e5b779b8f4d3dc2 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 21 Aug 2018 07:27:16 -0500 Subject: [PATCH 092/194] Added integration test for forked chains. GH #4973 --- tests/CMakeLists.txt | 4 + tests/nodeos_forked_chain_test.py | 175 ++++++++++++++++++++++++++++++ 2 files changed, 179 insertions(+) create mode 100755 tests/nodeos_forked_chain_test.py diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 059d42a0856..ff05bad19c2 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -34,6 +34,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-test.py ${CM configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-remote-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-remote-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRENT_BINARY_DIR}/sample-cluster-map.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/restart-scenarios-test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_forked_chain_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_forked_chain_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_remote_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_under_min_avail_ram.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_under_min_avail_ram.py COPYONLY) @@ -71,6 +72,9 @@ set_property(TEST bnet_nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) #add_test(NAME distributed_transactions_lr_test COMMAND tests/distributed-transactions-test.py -d 2 -p 21 -n 21 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) #set_property(TEST distributed_transactions_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests) + add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py new file mode 100755 index 00000000000..9dc52c0d843 --- /dev/null +++ b/tests/nodeos_forked_chain_test.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +import testUtils +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from TestHelper import AppArgs +from TestHelper import TestHelper + +import decimal +import math +import re + +############################################################### +# nodeos_voting_test +# --dump-error-details +# --keep-logs +############################################################### +Print=Utils.Print +errorExit=Utils.errorExit + +from core_symbol import CORE_SYMBOL + +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"}) +Utils.Debug=args.v +totalProducerNodes=21 +totalNonProducerNodes=1 +totalNodes=totalProducerNodes+totalNonProducerNodes +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +prodCount=args.prod_count +killAll=args.clean_run +p2pPlugin=args.p2p_plugin + +walletMgr=WalletMgr(True) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName="keosd" +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN") + + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + specificExtraNodeosArgs={} + # producer nodes will be mapped to 0 through totalProducerNodes-1, so totalProducerNodes will be the non-producing node + specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin" + if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, pnodes=totalProducerNodes, totalNodes=totalNodes, + totalProducers=totalProducerNodes, p2pPlugin=p2pPlugin, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: + Utils.cmdError("launcher") + errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + accounts=cluster.createAccountKeys(5) + if accounts is None: + errorExit("FAILURE - create keys") + accounts[0].name="tester111111" + accounts[1].name="tester222222" + accounts[2].name="tester333333" + accounts[3].name="tester444444" + accounts[4].name="tester555555" + + testWalletName="test" + + Print("Creating wallet \"%s\"." % (testWalletName)) + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + if walletMgr.launch() is False: + Utils.cmdError("%s" % (WalletdName)) + errorExit("Failed to stand up eos walletd.") + + testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]]) + + for _, account in cluster.defProducerAccounts.items(): + walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True) + + Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) + + nonProdNode=None + prodNodes=[] + producers={} + for i in range(0, totalNodes): + node=cluster.getNode(i) + node.producers=Cluster.parseProducers(i) + numProducers=len(node.producers) + if numProducers==1: + prod=node.producers[0] + trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) + prodNodes.append(node) + producers[prod]=node + elif numProducers==0: + if nonProdNode is None: + nonProdNode=node + else: + errorExit("More than one non-producing nodes") + else: + errorExit("Producing node should have 1 producer, it has %d" % (numProducers)) + + node=prodNodes[0] + # create accounts via eosio as otherwise a bid is needed + for account in accounts: + Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) + trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) + transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) + Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) + node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") + trans=node.delegatebw(account, 20000000.0000, 20000000.0000, exitOnError=True) + + #verify nodes are in sync and advancing + cluster.waitOnClusterSync(blockAdvancing=5) + index=0 + for account in accounts: + trans=prodNodes[index].vote(account, producers) + index+=1 + + #verify nodes are in sync and advancing + cluster.waitOnClusterSync(blockAdvancing=5) + blockNum=node.getNextCleanProductionCycle(trans) + blockProducer=node.getBlockProducer(blockNum) + Utils.Print("Validating blockNum=%s, producer=%s" % (blockNum, blockProducer)) + + lastBlockProducer=blockProducer + while blockProducer==lastBlockProducer: + blockNum+=1 + blockProducer=node.getBlockProducer(blockNum) + + productionCycle=[] + producerToSlot={} + slot=-1 + expectedCount=12 + while True: + if blockProducer not in producers: + errorExit("Producer %s was not one of the voted on producers" % blockProducer) + + productionCycle.append(blockProducer) + slot+=1 + if blockProducer in producerToSlot: + errorExit("Producer %s was first seen in slot %d, but is repeated in slot %d" % (blockProducer, producerToSlot[blockProducer], slot)) + + producerToSlot[blockProducer]={"slot":slot, "count":0} + lastBlockProducer=blockProducer + while blockProducer==lastBlockProducer: + producerToSlot[blockProducer]["count"]+=1 + blockNum+=1 + blockProducer=node.getBlockProducer(blockNum) + + if producerToSlot[lastBlockProducer]["count"]!=expectedCount: + errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, expectedCount, producerToSlot[lastBlockProducer]["count"])) + + if blockProducer==productionCycle[0]: + break + + output=None + for blockProducer in productionCycle: + if output is None: + output="" + else: + output+=", " + output+=blockProducer+":"+str(producerToSlot[blockProducer]["count"]) + Utils.Print("ProductionCycle ->> {\n%s\n}" % output) + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exit(0) From 82252f16180461543dcc0e56b659c6b017270d11 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 21 Aug 2018 07:28:19 -0500 Subject: [PATCH 093/194] Miscelaneous test fixes. GH #4973 --- tests/distributed-transactions-test.py | 5 +++-- tests/nodeos_under_min_avail_ram.py | 2 +- tests/p2p_network_test.py | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 0ed76f6a956..f83cb3aa0e7 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -11,7 +11,7 @@ errorExit=Utils.errorExit args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" - ,"--dump-error-details","-v","--leave-running","--clean-run"}) + ,"--dump-error-details","-v","--leave-running","--clean-run","--keep-logs"}) pnodes=args.p topo=args.s @@ -23,6 +23,7 @@ dontKill=args.leave_running dumpErrorDetails=args.dump_error_details killAll=args.clean_run +keepLogs=args.keep_logs killWallet=not dontKill killEosInstances=not dontKill @@ -97,6 +98,6 @@ testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, False, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) exit(0) diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index d06c1fe9de9..c615b4fbb38 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -144,7 +144,7 @@ def setName(self, num): Print("Publish contract") trans=nodes[0].publishContract(contractAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) if trans is None: - cmdError("%s set contract %s" % (ClientName, contractAccount.name)) + Utils.cmdError("%s set contract %s" % (ClientName, contractAccount.name)) errorExit("Failed to publish contract.") contract=contractAccount.name diff --git a/tests/p2p_network_test.py b/tests/p2p_network_test.py index bdd7ed16f00..49bd746e4e9 100755 --- a/tests/p2p_network_test.py +++ b/tests/p2p_network_test.py @@ -23,6 +23,7 @@ parser = argparse.ArgumentParser(add_help=False) Print=testUtils.Utils.Print +cmdError=Utils.cmdError errorExit=Utils.errorExit # Override default help argument so that only --help (and not -h) can call help From 8ff0eb72fe65db9b6a913d0d6bf8c51c713eb829 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 21 Aug 2018 07:29:49 -0500 Subject: [PATCH 094/194] Refactored out common voting methods from voting test to Node.py. GH #4973 --- tests/Node.py | 36 ++++++++++++++++++++ tests/nodeos_voting_test.py | 68 +++++++------------------------------ 2 files changed, 48 insertions(+), 56 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index e9a18103470..491b0b45ef8 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1036,6 +1036,42 @@ def verifyAlive(self, silent=False): else: return True + + def getBlockProducer(self, blockNum, timeout=None, waitForBlock=True, exitOnError=True): + if waitForBlock: + self.waitForBlock(blockNum, timeout=timeout) + block=self.getBlock(blockNum, exitOnError=exitOnError) + blockProducer=block["producer"] + if blockProducer is None and exitOnError: + Utils.cmdError("could not get producer for block number %s" % (blockNum)) + errorExit("Failed to get block's producer") + return blockProducer + + def getNextCleanProductionCycle(self, trans): + transId=Node.getTransId(trans) + rounds=21*12*2 # max time to ensure that at least 2/3+1 of producers x blocks per producer x at least 2 times + self.waitForTransFinalization(transId, timeout=rounds/2) + irreversibleBlockNum=self.getIrreversibleBlockNum() + + # The voted schedule should be promoted now, then need to wait for that to become irreversible + votingTallyWindow=120 #could be up to 120 blocks before the votes were tallied + promotedBlockNum=self.getHeadBlockNum()+votingTallyWindow + self.waitForIrreversibleBlock(promotedBlockNum, timeout=rounds/2) + + ibnSchedActive=self.getIrreversibleBlockNum() + + blockNum=self.getHeadBlockNum() + Utils.Print("Searching for clean production cycle blockNum=%s ibn=%s transId=%s promoted bn=%s ibn for schedule active=%s" % (blockNum,irreversibleBlockNum,transId,promotedBlockNum,ibnSchedActive)) + blockProducer=self.getBlockProducer(blockNum) + blockNum+=1 + Utils.Print("Advance until the next block producer is retrieved") + while blockProducer == self.getBlockProducer(blockNum): + blockNum+=1 + + blockProducer=self.getBlockProducer(blockNum) + return blockNum + + # TBD: make nodeId an internal property # pylint: disable=too-many-locals def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None): diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index b560795b6a7..58e482395c1 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -25,34 +25,14 @@ def populate(node, num): ProducerToNode.map[prod]=num Utils.Print("Producer=%s for nodeNum=%s" % (prod,num)) -def vote(node, account, producers): - Print("Votes for %s" % (account.name)) - trans=node.vote(account, producers, waitForTransBlock=False, exitOnError=True) - return trans - -def getBlockProducer(node, blockNum): - node.waitForBlock(blockNum) - block=node.getBlock(blockNum, exitOnError=True) - blockProducer=block["producer"] - if blockProducer is None: - Utils.cmdError("could not get producer for block number %s" % (blockNum)) - Utils.errorExit("Failed to get block's producer") - return blockProducer - -def getNodeNum(cluster, node): - for i in range(0, 4): - if node == cluster.getNode(i): - return i - return -1 - def isValidBlockProducer(prodsActive, blockNum, node): - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducer(blockNum) if blockProducer not in prodsActive: return False return prodsActive[blockProducer] def validBlockProducer(prodsActive, prodsSeen, blockNum, node): - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducer(blockNum) if blockProducer not in prodsActive: Utils.cmdError("unexpected block producer %s at blockNum=%s" % (blockProducer,blockNum)) Utils.errorExit("Failed because of invalid block producer") @@ -61,47 +41,23 @@ def validBlockProducer(prodsActive, prodsSeen, blockNum, node): Utils.errorExit("Failed because of incorrect block producer") prodsSeen[blockProducer]=True -def getNextCleanProductionCycle(trans, node): - transId=Node.getTransId(trans) - rounds=21*12*2 # max time to ensure that at least 2/3+1 of producers x blocks per producer x at least 2 times - node.waitForTransFinalization(transId, timeout=rounds/2) - irreversibleBlockNum=node.getIrreversibleBlockNum() - - # The voted schedule should be promoted now, then need to wait for that to become irreversible - votingTallyWindow=120 #could be up to 120 blocks before the votes were tallied - promotedBlockNum=node.getHeadBlockNum()+votingTallyWindow - node.waitForIrreversibleBlock(promotedBlockNum, timeout=rounds/2) - - ibnSchedActive=node.getIrreversibleBlockNum() - - blockNum=node.getHeadBlockNum() - Utils.Print("Searching for clean production cycle blockNum=%s ibn=%s transId=%s promoted bn=%s ibn for schedule active=%s" % (blockNum,irreversibleBlockNum,transId,promotedBlockNum,ibnSchedActive)) - blockProducer=getBlockProducer(node, blockNum) - blockNum+=1 - Utils.Print("Advance until the next block producer is retrieved") - while blockProducer == getBlockProducer(node, blockNum): - blockNum+=1 - - blockProducer=getBlockProducer(node, blockNum) - return blockNum - def setActiveProducers(prodsActive, activeProducers): for prod in prodsActive: prodsActive[prod]=prod in activeProducers def verifyProductionRounds(trans, node, prodsActive, rounds): - blockNum=getNextCleanProductionCycle(trans, node) + blockNum=node.getNextCleanProductionCycle(trans) Utils.Print("Validating blockNum=%s" % (blockNum)) temp=Utils.Debug Utils.Debug=False Utils.Print("FIND VALID BLOCK PRODUCER") - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducer(blockNum) lastBlockProducer=blockProducer adjust=False while not isValidBlockProducer(prodsActive, blockNum, node): adjust=True - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducer(blockNum) if lastBlockProducer!=blockProducer: Utils.Print("blockProducer=%s for blockNum=%s is for node=%s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer])) lastBlockProducer=blockProducer @@ -132,7 +88,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): Utils.Print("saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum)) lastBlockProducer=blockProducer saw=1 - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducer(blockNum) blockNum+=1 if adjust: @@ -147,22 +103,22 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): lastBlockProducer=None for j in range(0, 21): # each new set of 12 blocks should have a different blockProducer - if lastBlockProducer is not None and lastBlockProducer==getBlockProducer(node, blockNum): + if lastBlockProducer is not None and lastBlockProducer==node.getBlockProducer(blockNum): Utils.cmdError("expected blockNum %s to be produced by any of the valid producers except %s" % (blockNum, lastBlockProducer)) Utils.errorExit("Failed because of incorrect block producer order") # make sure that the next set of 12 blocks all have the same blockProducer - lastBlockProducer=getBlockProducer(node, blockNum) + lastBlockProducer=node.getBlockProducer(blockNum) for k in range(0, 12): validBlockProducer(prodsActive, prodsSeen, blockNum, node1) - blockProducer=getBlockProducer(node, blockNum) + blockProducer=node.getBlockProducer(blockNum) if lastBlockProducer!=blockProducer: printStr="" newBlockNum=blockNum-18 for l in range(0,36): printStr+="%s" % (newBlockNum) printStr+=":" - newBlockProducer=getBlockProducer(node, newBlockNum) + newBlockProducer=node.getBlockProducer(newBlockNum) printStr+="%s" % (newBlockProducer) printStr+=" " newBlockNum+=1 @@ -273,7 +229,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): #first account will vote for node0 producers, all others will vote for node1 producers node=node0 for account in accounts: - trans=vote(node, account, node.producers) + trans=node.vote(account, node.producers) node=node1 setActiveProducers(prodsActive, node1.producers) @@ -284,7 +240,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): # first account will vote for node2 producers, all others will vote for node3 producers node1 for account in accounts: - trans=vote(node, account, node.producers) + trans=node.vote(account, node.producers) node=node2 setActiveProducers(prodsActive, node2.producers) From 8c9349dc1c009fa2c26348daa5d6d18faf4f4316 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 21 Aug 2018 07:31:04 -0500 Subject: [PATCH 095/194] Added class for providing arguments that are not common to the whole integration test suite. GH #4973 --- tests/TestHelper.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index e521eb3e696..a1d1fd1b6eb 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -6,6 +6,23 @@ import argparse +class AppArgs: + def __init__(self): + self.args=[] + + class AppArg: + def __init__(self, flag, type, help, default, choices=None): + self.flag=flag + self.type=type + self.help=help + self.default=default + self.choices=choices + + def add(self, flag, type, help, default, choices=None): + arg=self.AppArg(flag, type, help, default, choices) + self.args.append(arg) + Utils.Print("args %d" % (len(self.args))) + # pylint: disable=too-many-instance-attributes class TestHelper(object): LOCAL_HOST="localhost" @@ -14,10 +31,12 @@ class TestHelper(object): @staticmethod # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def parse_args(includeArgs): + def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): """Accepts set of arguments, builds argument parser and returns parse_args() output.""" assert(includeArgs) assert(isinstance(includeArgs, set)) + assert(isinstance(applicationSpecificArgs, AppArgs)) + Utils.Print("applicationSpecificArgs %d" % (len(applicationSpecificArgs.args))) parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-?', action='help', default=argparse.SUPPRESS, @@ -82,6 +101,9 @@ def parse_args(includeArgs): if "--sanity-test" in includeArgs: parser.add_argument("--sanity-test", help="Validates nodeos and kleos are in path and can be started up.", action='store_true') + for arg in applicationSpecificArgs.args: + parser.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) + args = parser.parse_args() return args From aee0cd71b3240c40f5ff16ab7be9b584fcaa0c18 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 21 Aug 2018 09:05:17 -0500 Subject: [PATCH 096/194] Fixes for supporting a large number of producer nodes. GH #4973 --- tests/Cluster.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 91c8966e1ee..ee07cd6b0de 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -203,7 +203,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne Utils.Print("Bootstrap cluster.") if onlyBios or not useBiosBootFile: - self.biosNode=Cluster.bootstrap(totalNodes, prodCount, Cluster.__BiosHost, Cluster.__BiosPort, dontKill, onlyBios) + self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, dontKill, onlyBios) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False @@ -323,12 +323,14 @@ def setNodes(self, nodes): """manually set nodes, alternative to explicit launch""" self.nodes=nodes - def waitOnClusterSync(self, timeout=None, blockType=BlockType.head): - """Get head block on node 0, then ensure the block is present on every cluster node.""" + def waitOnClusterSync(self, timeout=None, blockType=BlockType.head, blockAdvancing=0): + """Get head or irrevercible block on node 0, then ensure that block (or that block plus the + blockAdvancing) is present on every cluster node.""" assert(self.nodes) assert(len(self.nodes) > 0) node=self.nodes[0] - targetBlockNum=node.getBlockNum(blockType) #retrieve node 0's head or irrevercible block number + targetBlockNum=node.getBlockNum(blockType) #retrieve node 0's head or irrevercible block number + targetBlockNum+=blockAdvancing if Utils.Debug: Utils.Print("%s block number on root node: %d" % (blockType.type, targetBlockNum)) if targetBlockNum == -1: @@ -649,6 +651,7 @@ def parseProducerKeys(configFile, nodeName): pattern=r"^\s*private-key\s*=\W+(\w+)\W+(\w+)\W+$" m=re.search(pattern, configStr, re.MULTILINE) + regMsg="None" if m is None else "NOT None" if m is None: if Utils.Debug: Utils.Print("Failed to find producer keys") return None @@ -710,6 +713,7 @@ def parseClusterKeys(totalNodes): keys=Cluster.parseProducerKeys(configFile, node) if keys is not None: producerKeys.update(keys) + keyMsg="None" if keys is None else len(keys) return producerKeys @@ -803,7 +807,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): return biosNode @staticmethod - def bootstrap(totalNodes, prodCount, biosHost, biosPort, dontKill=False, onlyBios=False): + def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKill=False, onlyBios=False): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -815,8 +819,11 @@ def bootstrap(totalNodes, prodCount, biosHost, biosPort, dontKill=False, onlyBio producerKeys=Cluster.parseClusterKeys(totalNodes) # should have totalNodes node plus bios node - if producerKeys is None or len(producerKeys) < (totalNodes+1): - Utils.Print("ERROR: Failed to parse private keys from cluster config files.") + if producerKeys is None or len(producerKeys) < (totalProducers+1): + if producerKeys is None: + Utils.Print("ERROR: Failed to parse any producer keys from config files.") + else: + Utils.Print("ERROR: Failed to parse %d producer keys from cluster config files, only found %d." % (totalNodes+1,len(producerKeys))) return None walletMgr=WalletMgr(True) @@ -1092,7 +1099,11 @@ def myFunc(): Utils.Print("ERROR: No nodes discovered.") return nodes - if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOut) + if len(psOut) < 6660: + psOutDisplay=psOut + else: + psOutDisplay=psOut[:6660]+"..." + if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) for i in range(0, totalNodes): pattern=r"[\n]?(\d+) (.* --data-dir var/lib/node_%02d .*)\n" % (i) m=re.search(pattern, psOut, re.MULTILINE) @@ -1104,6 +1115,7 @@ def myFunc(): if Utils.Debug: Utils.Print("Node>", instance) nodes.append(instance) + if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes # Kills a percentange of Eos instances starting from the tail and update eosInstanceInfos state From 71474cd870b286f2aa0b5b20b1fa5f7a5ece83ce Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 21 Aug 2018 13:31:19 -0500 Subject: [PATCH 097/194] Fix error from change to support over 21 nodes. GH #4973 --- tests/Cluster.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/Cluster.py b/tests/Cluster.py index ee07cd6b0de..27f9913deb5 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -812,6 +812,9 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKil Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" Utils.Print("Starting cluster bootstrap.") + if totalProducers is None: + totalProducers=totalNodes + biosNode=Node(biosHost, biosPort) if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") From 866953a37fa6f67d4964b854a9276066597595a5 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 21 Aug 2018 13:31:57 -0500 Subject: [PATCH 098/194] Refactored enum class into testUtils.py. GH #4973 --- tests/Node.py | 27 +++++++-------------------- tests/testUtils.py | 14 ++++++++++++++ 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 491b0b45ef8..f89484a1c37 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -10,31 +10,18 @@ from core_symbol import CORE_SYMBOL from testUtils import Utils from testUtils import Account +from testUtils import EnumType +from testUtils import addEnum +from testUtils import unhandledEnumType -def addEnum(enumClassType, type): - setattr(enumClassType, type, enumClassType(type)) - -def unhandledEnumType(type): - raise RuntimeError("No case defined for type=%s" % (type.type)) - -class ReturnType: - - def __init__(self, type): - self.type=type - - def __str__(self): - return self.type +class ReturnType(EnumType): + pass addEnum(ReturnType, "raw") addEnum(ReturnType, "json") -class BlockType: - - def __init__(self, type): - self.type=type - - def __str__(self): - return self.type +class BlockType(EnumType): + pass addEnum(BlockType, "head") addEnum(BlockType, "lib") diff --git a/tests/testUtils.py b/tests/testUtils.py index d2e3d5be9f3..40613de02ba 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -188,3 +188,17 @@ def __str__(self): return "Name: %s" % (self.name) ########################################################################################### + +def addEnum(enumClassType, type): + setattr(enumClassType, type, enumClassType(type)) + +def unhandledEnumType(type): + raise RuntimeError("No case defined for type=%s" % (type.type)) + +class EnumType: + + def __init__(self, type): + self.type=type + + def __str__(self): + return self.type From aba214a8db1dd45c04059288f2786b08930c3197 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 27 Aug 2018 07:50:28 -0500 Subject: [PATCH 099/194] Added logic to scripts to create a bridge node between 2 sides of a testnet. GH #4973 --- tests/Cluster.py | 103 +++++++++++++++++++++++++++++- tests/Node.py | 84 +++++++++++++++++------- tests/nodeos_forked_chain_test.py | 46 +++++++++---- 3 files changed, 197 insertions(+), 36 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 27f9913deb5..2c6d1823920 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -104,7 +104,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne pnodes: producer nodes count totalNodes: producer + non-producer nodes count prodCount: producers per producer node count - topo: cluster topology (as defined by launcher) + topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) delay: delay between individual nodes launch (as defined by launcher) delay 0 exposes a bootstrap bug where producer handover may have a large gap confusing nodes and bringing system to a halt. onlyBios: When true, only loads the bios contract (and not more full bootstrapping). @@ -116,6 +116,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne specificExtraNodeosArgs: dictionary of arguments to pass to a specific node (via --specific-num and --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } """ + assert(isinstance(topo, str)) + if not self.localCluster: Utils.Print("WARNING: Cluster not local, not launching %s." % (Utils.EosServerName)) return True @@ -132,8 +134,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne Utils.Print("ERROR: Another process is listening on nodeos default port.") return False - cmd="%s -p %s -n %s -s %s -d %s -i %s -f --p2p-plugin %s %s" % ( - Utils.EosLauncherPath, pnodes, totalNodes, topo, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], + cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s" % ( + Utils.EosLauncherPath, pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], p2pPlugin, producerFlag) cmdArr=cmd.split() if self.staging: @@ -164,6 +166,101 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--specific-nodeos") cmdArr.append(arg) + shapeFilePrefix="shape_bridge" + shapeFile=shapeFilePrefix+".json" + shapeHostsFile=shapeFilePrefix+"_hosts.json" + # must be last cmdArr.append before subprocess.call, so that everything is on the command line + # before constructing the shape.json file for "bridge" + if topo=="bridge": + numProducers=totalProducers if totalProducers is not None else totalNodes + maxProducers=ord('z')-ord('a')+1 + assert numProducers, ] with first being the name and second being the node definition + shapeFileNodes = shapeFileObject["nodes"] + # will make a map to node object to make identification easier + biosNodeObject=None + bridgeNodes={} + producerNodes={} + producers={} + index=0 + for append in range(ord('a'),ord('a')+numProducers): + name="defproducer" + chr(append) + producers[name]=index + index+=1 + + # first group starts at 0 + secondGroupStart=int((numProducers+1)/2) + producerGroup1=[] + producerGroup2=[] + + Utils.Print("producers=%s" % (producers)) + shapeFileNodeMap = {} + def getNodeNum(nodeName): + p=re.compile(r'^testnet_(\d+)$') + m=p.match(nodeName) + return int(m.group(1)) + + for shapeFileNodePair in shapeFileNodes: + assert(len(shapeFileNodePair)==2) + nodeName=shapeFileNodePair[0] + shapeFileNode=shapeFileNodePair[1] + shapeFileNodeMap[nodeName]=shapeFileNode + Utils.Print("name=%s, shapeFileNode=%s" % (nodeName, shapeFileNodeMap[shapeFileNodePair[0]])) + if nodeName=="bios": + biosNodeObject=shapeFileNode + continue + nodeNum=getNodeNum(nodeName) + Utils.Print("nodeNum=%d, shapeFileNode=%s" % (nodeNum, shapeFileNode)) + assert("producers" in shapeFileNode) + numNodeProducers=len(shapeFileNode["producers"]) + if (numNodeProducers==0): + bridgeNodes[nodeName]=shapeFileNode + else: + producerNodes[nodeName]=shapeFileNode + if nodeNum 0: @@ -437,7 +438,7 @@ def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTran cmd="%s -j %s %s %s %s" % ( cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); - trans=self.processCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) transId=Node.getTransId(trans) if stakedDeposit > 0: @@ -453,7 +454,7 @@ def getEosAccount(self, name, exitOnError=False): cmdDesc="get account" cmd="%s -j %s" % (cmdDesc, name) msg="( getEosAccount(name=%s) )" % (name); - return self.processCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) else: return self.getEosAccountFromDb(name, exitOnError=exitOnError) @@ -477,7 +478,7 @@ def getTable(self, contract, scope, table, exitOnError=False): cmdDesc = "get table" cmd="%s %s %s %s" % (cmdDesc, contract, scope, table) msg="contract=%s, scope=%s, table=%s" % (contract, scope, table); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) def getTableAccountBalance(self, contract, scope): assert(isinstance(contract, str)) @@ -501,7 +502,7 @@ def getCurrencyBalance(self, contract, account, symbol=CORE_SYMBOL, exitOnError= cmdDesc = "get currency balance" cmd="%s %s %s %s" % (cmdDesc, contract, account, symbol) msg="contract=%s, account=%s, symbol=%s" % (contract, account, symbol); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg, returnType=ReturnType.raw) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg, returnType=ReturnType.raw) def getCurrencyStats(self, contract, symbol=CORE_SYMBOL, exitOnError=False): """returns Json output from get currency stats.""" @@ -512,7 +513,7 @@ def getCurrencyStats(self, contract, symbol=CORE_SYMBOL, exitOnError=False): cmdDesc = "get currency stats" cmd="%s %s %s" % (cmdDesc, contract, symbol) msg="contract=%s, symbol=%s" % (contract, symbol); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) # Verifies account. Returns "get account" json return object def verifyAccount(self, account): @@ -672,7 +673,7 @@ def getAccountsByKey(self, key, exitOnError=False): cmdDesc = "get accounts" cmd="%s %s" % (cmdDesc, key) msg="key=%s" % (key); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) # Get actions mapped to an account (cleos get actions) def getActions(self, account, pos=-1, offset=-1, exitOnError=False): @@ -684,7 +685,7 @@ def getActions(self, account, pos=-1, offset=-1, exitOnError=False): cmdDesc = "get actions" cmd="%s -j %s %d %d" % (cmdDesc, account.name, pos, offset) msg="account=%s, pos=%d, offset=%d" % (account.name, pos, offset); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) else: return self.getActionsMdb(account, pos, offset, exitOnError=exitOnError) @@ -722,7 +723,7 @@ def getServants(self, name, exitOnError=False): cmdDesc = "get servants" cmd="%s %s" % (cmdDesc, name) msg="name=%s" % (name); - return self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) def getServantsArr(self, name): trans=self.getServants(name, exitOnError=True) @@ -838,7 +839,7 @@ def pushMessage(self, account, action, data, opts, silentErrors=False): def setPermission(self, account, code, pType, requirement, waitForTransBlock=False, exitOnError=False): cmdDesc="set action permission" cmd="%s -j %s %s %s %s" % (cmdDesc, account, code, pType, requirement) - trans=self.processCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -851,7 +852,7 @@ def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, tran cmd="%s -j %s %s \"%s %s\" \"%s %s\" %s" % ( cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); - trans=self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -860,7 +861,7 @@ def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnEr cmd="%s -j %s %s %s %s" % ( cmdDesc, producer.name, producer.activePublicKey, url, location) msg="producer=%s" % (producer.name); - trans=self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -869,11 +870,11 @@ def vote(self, account, producers, waitForTransBlock=False, exitOnError=False): cmd="%s -j %s %s" % ( cmdDesc, account.name, " ".join(producers)) msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); - trans=self.processCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def processCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): assert(isinstance(returnType, ReturnType)) cmd="%s %s %s" % (Utils.EosClientPath, self.eosClientArgs(), cmd) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) @@ -883,10 +884,49 @@ def processCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) elif returnType==ReturnType.raw: trans=Utils.runCmdReturnStr(cmd) + else: + unhandledEnumType(returnType) + except subprocess.CalledProcessError as ex: + if not silentErrors: + msg=ex.output.decode("utf-8") + errorMsg="Exception during \"%s\". %s" % (cmdDesc, msg) + if exitOnError: + Utils.cmdError(errorMsg) + Utils.errorExit(errorMsg) + else: + Utils.Print("ERROR: %s" % (errorMsg)) + return None + + if exitMsg is not None: + exitMsg=": " + exitMsg + else: + exitMsg="" + if exitOnError and trans is None: + Utils.cmdError("could not \"%s\" - %s" % (cmdDesc,exitMsg)) + errorExit("Failed to \"%s\"" % (cmdDesc)) + + return trans + + def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(producer, str)) + assert(isinstance(whereInSequence, int)) + assert(isinstance(blockType, BlockType)) + assert(isinstance(returnType, ReturnType)) + basedOnLib=True if blockType==BlockType.lib else False + cmd="curl %s -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":%s }' -X POST -H \"Content-Type: application/json\"" % (self.endpointHttp, producer, whereInSequence, basedOnLib) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + trans=None + try: + if returnType==ReturnType.json: + trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) + elif returnType==ReturnType.raw: + trans=Utils.runCmdReturnStr(cmd) + else: + unhandledEnumType(returnType) except subprocess.CalledProcessError as ex: if not silentErrors: msg=ex.output.decode("utf-8") - errorMsg="Exception during %s. %s" % (cmdDesc, msg) + errorMsg="Exception during \"%s\". %s" % (cmd, msg) if exitOnError: Utils.cmdError(errorMsg) Utils.errorExit(errorMsg) @@ -899,8 +939,8 @@ def processCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg else: exitMsg="" if exitOnError and trans is None: - Utils.cmdError("could not %s - %s" % (cmdDesc,exitMsg)) - Utils.errorExit("Failed to %s" % (cmdDesc)) + Utils.cmdError("could not \"%s\" - %s" % (cmd,exitMsg)) + Utils.errorExit("Failed to \"%s\"" % (cmd)) return trans @@ -918,7 +958,7 @@ def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False) def getInfo(self, silentErrors=False, exitOnError=False): cmdDesc = "get info" - info=self.processCmd(cmdDesc, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError) + info=self.processCleosCmd(cmdDesc, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError) if info is None: self.infoValid=False else: diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 9dc52c0d843..c01a0b82e43 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -2,6 +2,7 @@ from testUtils import Utils import testUtils +import time from Cluster import Cluster from WalletMgr import WalletMgr from Node import Node @@ -18,7 +19,6 @@ # --keep-logs ############################################################### Print=Utils.Print -errorExit=Utils.errorExit from core_symbol import CORE_SYMBOL @@ -52,17 +52,21 @@ specificExtraNodeosArgs={} # producer nodes will be mapped to 0 through totalProducerNodes-1, so totalProducerNodes will be the non-producing node specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin" - if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, pnodes=totalProducerNodes, totalNodes=totalNodes, - totalProducers=totalProducerNodes, p2pPlugin=p2pPlugin, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: + if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, topo="bridge", pnodes=totalProducerNodes, + totalNodes=totalNodes, totalProducers=totalProducerNodes, p2pPlugin=p2pPlugin, + specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") - errorExit("Failed to stand up eos cluster.") + Utils.errorExit("Failed to stand up eos cluster.") + + # "bridge" shape connects defprocera through defproducerk to each other and defproducerl through defproduceru and the only + # connection between those 2 groups is through the bridge node Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) accounts=cluster.createAccountKeys(5) if accounts is None: - errorExit("FAILURE - create keys") + Utils.errorExit("FAILURE - create keys") accounts[0].name="tester111111" accounts[1].name="tester222222" accounts[2].name="tester333333" @@ -76,7 +80,7 @@ walletMgr.cleanup() if walletMgr.launch() is False: Utils.cmdError("%s" % (WalletdName)) - errorExit("Failed to stand up eos walletd.") + Utils.errorExit("Failed to stand up eos walletd.") testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]]) @@ -101,9 +105,9 @@ if nonProdNode is None: nonProdNode=node else: - errorExit("More than one non-producing nodes") + Utils.errorExit("More than one non-producing nodes") else: - errorExit("Producing node should have 1 producer, it has %d" % (numProducers)) + Utils.errorExit("Producing node should have 1 producer, it has %d" % (numProducers)) node=prodNodes[0] # create accounts via eosio as otherwise a bid is needed @@ -139,12 +143,12 @@ expectedCount=12 while True: if blockProducer not in producers: - errorExit("Producer %s was not one of the voted on producers" % blockProducer) + Utils.errorExit("Producer %s was not one of the voted on producers" % blockProducer) productionCycle.append(blockProducer) slot+=1 if blockProducer in producerToSlot: - errorExit("Producer %s was first seen in slot %d, but is repeated in slot %d" % (blockProducer, producerToSlot[blockProducer], slot)) + Utils.errorExit("Producer %s was first seen in slot %d, but is repeated in slot %d" % (blockProducer, producerToSlot[blockProducer], slot)) producerToSlot[blockProducer]={"slot":slot, "count":0} lastBlockProducer=blockProducer @@ -154,7 +158,7 @@ blockProducer=node.getBlockProducer(blockNum) if producerToSlot[lastBlockProducer]["count"]!=expectedCount: - errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, expectedCount, producerToSlot[lastBlockProducer]["count"])) + Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, expectedCount, producerToSlot[lastBlockProducer]["count"])) if blockProducer==productionCycle[0]: break @@ -168,6 +172,26 @@ output+=blockProducer+":"+str(producerToSlot[blockProducer]["count"]) Utils.Print("ProductionCycle ->> {\n%s\n}" % output) + for prodNode in prodNodes: + prodNode.getInfo() + + cluster.reportStatus() + + Utils.Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.") + nonProdNode.killNodeOnProducer(producer="defproducerl", whereInSequence=0) + + for prodNode in prodNodes: + prodNode.getInfo() + + cluster.reportStatus() + + time.sleep(60) + assert(not nonProdNode.verifyAlive()) + for prodNode in prodNodes: + prodNode.getInfo() + + cluster.reportStatus() + testSuccessful=True finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) From 6768c3fd08f305cdded62edea2fc5fa94fd222f7 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 15:20:13 -0500 Subject: [PATCH 100/194] Cleaning up producer_name method to be static. GH #4973 --- programs/eosio-launcher/main.cpp | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 155340d69bd..159117ecd36 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -349,8 +349,7 @@ enum allowed_connection : char { class producer_names { public: - producer_names(int total_producers); - string producer_name(unsigned int producer_number) const; + static string producer_name(unsigned int producer_number); private: static const int total_chars = 12; static const char slot_chars[]; @@ -362,11 +361,7 @@ const char producer_names::valid_char_range = sizeof(producer_names::slot_chars) // for 26 or fewer total producers create "defproducera" .. "defproducerz" // above 26 produce "defproducera" .. "defproducerz", "defproduceaa" .. "defproducerb", etc. -producer_names::producer_names(int total_producers) -{ -} - -string producer_names::producer_name(unsigned int producer_number) const { +string producer_names::producer_name(unsigned int producer_number) { // keeping legacy "defproducer[a-z]", but if greater than valid_char_range, will use "defpraaaaaaa" char prod_name[] = "defproducera"; if (producer_number > valid_char_range) { @@ -834,7 +829,6 @@ launcher_def::bind_nodes () { cerr << "Unable to allocate producers due to insufficient prod_nodes = " << prod_nodes << "\n"; exit (10); } - producer_names names(producers); int non_bios = prod_nodes - 1; int per_node = producers / non_bios; int extra = producers % non_bios; @@ -864,7 +858,7 @@ launcher_def::bind_nodes () { } char ext = i; while (count--) { - const auto prodname = names.producer_name(ext); + const auto prodname = producer_names::producer_name(ext); node.producers.push_back(prodname); producer_set.schedule.push_back({prodname,pubkey}); ext += non_bios; From 00d4d562cd99f70a598cb5d9c30739ca6815ca5a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 15:24:44 -0500 Subject: [PATCH 101/194] Changed launcher to assign producer names in order inside a producing node before proceeding to the next node, instead of round robin assigning names among producing nodes. GH #4973 --- programs/eosio-launcher/main.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 159117ecd36..6654b581649 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -833,6 +833,7 @@ launcher_def::bind_nodes () { int per_node = producers / non_bios; int extra = producers % non_bios; unsigned int i = 0; + unsigned int producer_number = 0; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; @@ -856,12 +857,11 @@ launcher_def::bind_nodes () { ++count; --extra; } - char ext = i; while (count--) { - const auto prodname = producer_names::producer_name(ext); + const auto prodname = producer_names::producer_name(producer_number); node.producers.push_back(prodname); producer_set.schedule.push_back({prodname,pubkey}); - ext += non_bios; + ++producer_number; } } } From 1fe7ebf036056a75c1147aa032da048628a542e2 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 15:49:43 -0500 Subject: [PATCH 102/194] Changed to Node.getBlockProducerByNum to be clearer and added plumbing to make methods that were designed around head to also be able to use LIB. GH #4973 --- tests/Node.py | 27 ++++++++++++--------------- tests/nodeos_voting_test.py | 18 +++++++++--------- 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 31d49cc4cd3..c0bba562e16 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -555,21 +555,19 @@ def waitForTransFinalization(self, transId, timeout=None): ret=Utils.waitForBool(lam, timeout) return ret - def waitForNextBlock(self, timeout=None): - num=self.getHeadBlockNum() + def waitForNextBlock(self, timeout=None, blockType=BlockType.head): + num=self.getBlockNum(blockType=blockType) lam = lambda: self.getHeadBlockNum() > num ret=Utils.waitForBool(lam, timeout) return ret - def waitForBlock(self, blockNum, timeout=None): - lam = lambda: self.getHeadBlockNum() > blockNum + def waitForBlock(self, blockNum, timeout=None, blockType=BlockType.head): + lam = lambda: self.getBlockNum(blockType=blockType) > blockNum ret=Utils.waitForBool(lam, timeout) return ret - def waitForIrreversibleBlock(self, blockNum, timeout=None): - lam = lambda: self.getIrreversibleBlockNum() >= blockNum - ret=Utils.waitForBool(lam, timeout) - return ret + def waitForIrreversibleBlock(self, blockNum, timeout=None, blockType=BlockType.head): + return self.waitForBlock(blockNum, timeout=timeout, blockType=blockType) # Trasfer funds. Returns "transfer" json return object def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True): @@ -1011,7 +1009,7 @@ def getIrreversibleBlockNum(self): return blockNum return None - def getBlockNum(self, blockType): + def getBlockNum(self, blockType=BlockType.head): assert isinstance(blockType, BlockType) if blockType==BlockType.head: return self.getHeadBlockNum() @@ -1063,10 +1061,9 @@ def verifyAlive(self, silent=False): else: return True - - def getBlockProducer(self, blockNum, timeout=None, waitForBlock=True, exitOnError=True): + def getBlockProducerByNum(self, blockNum, timeout=None, waitForBlock=True, exitOnError=True): if waitForBlock: - self.waitForBlock(blockNum, timeout=timeout) + self.waitForBlock(blockNum, timeout=timeout, blockType=BlockType.head) block=self.getBlock(blockNum, exitOnError=exitOnError) blockProducer=block["producer"] if blockProducer is None and exitOnError: @@ -1089,13 +1086,13 @@ def getNextCleanProductionCycle(self, trans): blockNum=self.getHeadBlockNum() Utils.Print("Searching for clean production cycle blockNum=%s ibn=%s transId=%s promoted bn=%s ibn for schedule active=%s" % (blockNum,irreversibleBlockNum,transId,promotedBlockNum,ibnSchedActive)) - blockProducer=self.getBlockProducer(blockNum) + blockProducer=self.getBlockProducerByNum(blockNum) blockNum+=1 Utils.Print("Advance until the next block producer is retrieved") - while blockProducer == self.getBlockProducer(blockNum): + while blockProducer == self.getBlockProducerByNum(blockNum): blockNum+=1 - blockProducer=self.getBlockProducer(blockNum) + blockProducer=self.getBlockProducerByNum(blockNum) return blockNum diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index 58e482395c1..b6f176af8c9 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -26,13 +26,13 @@ def populate(node, num): Utils.Print("Producer=%s for nodeNum=%s" % (prod,num)) def isValidBlockProducer(prodsActive, blockNum, node): - blockProducer=node.getBlockProducer(blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) if blockProducer not in prodsActive: return False return prodsActive[blockProducer] def validBlockProducer(prodsActive, prodsSeen, blockNum, node): - blockProducer=node.getBlockProducer(blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) if blockProducer not in prodsActive: Utils.cmdError("unexpected block producer %s at blockNum=%s" % (blockProducer,blockNum)) Utils.errorExit("Failed because of invalid block producer") @@ -52,12 +52,12 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): temp=Utils.Debug Utils.Debug=False Utils.Print("FIND VALID BLOCK PRODUCER") - blockProducer=node.getBlockProducer(blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) lastBlockProducer=blockProducer adjust=False while not isValidBlockProducer(prodsActive, blockNum, node): adjust=True - blockProducer=node.getBlockProducer(blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) if lastBlockProducer!=blockProducer: Utils.Print("blockProducer=%s for blockNum=%s is for node=%s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer])) lastBlockProducer=blockProducer @@ -88,7 +88,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): Utils.Print("saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum)) lastBlockProducer=blockProducer saw=1 - blockProducer=node.getBlockProducer(blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) blockNum+=1 if adjust: @@ -103,22 +103,22 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): lastBlockProducer=None for j in range(0, 21): # each new set of 12 blocks should have a different blockProducer - if lastBlockProducer is not None and lastBlockProducer==node.getBlockProducer(blockNum): + if lastBlockProducer is not None and lastBlockProducer==node.getBlockProducerByNum(blockNum): Utils.cmdError("expected blockNum %s to be produced by any of the valid producers except %s" % (blockNum, lastBlockProducer)) Utils.errorExit("Failed because of incorrect block producer order") # make sure that the next set of 12 blocks all have the same blockProducer - lastBlockProducer=node.getBlockProducer(blockNum) + lastBlockProducer=node.getBlockProducerByNum(blockNum) for k in range(0, 12): validBlockProducer(prodsActive, prodsSeen, blockNum, node1) - blockProducer=node.getBlockProducer(blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) if lastBlockProducer!=blockProducer: printStr="" newBlockNum=blockNum-18 for l in range(0,36): printStr+="%s" % (newBlockNum) printStr+=":" - newBlockProducer=node.getBlockProducer(newBlockNum) + newBlockProducer=node.getBlockProducerByNum(newBlockNum) printStr+="%s" % (newBlockProducer) printStr+=" " newBlockNum+=1 From c42410f3f608115a3118df4e55fe93b682249d35 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 15:50:08 -0500 Subject: [PATCH 103/194] Cleaning up log statements. GH #4973 --- .../test_control_plugin/test_control_plugin.cpp | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp index ce34a5f5893..7b63acac1f0 100644 --- a/plugins/test_control_plugin/test_control_plugin.cpp +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -38,17 +38,14 @@ class test_control_plugin_impl { }; void test_control_plugin_impl::connect() { - wlog("test_control_plugin::kill_node_on_producer() lib"); _irreversible_block_connection.emplace( _chain.irreversible_block.connect( [&]( const chain::block_state_ptr& bs ) { applied_irreversible_block( bs ); } )); - wlog("test_control_plugin::kill_node_on_producer() head"); _accepted_block_connection = _chain.accepted_block.connect( [&]( const chain::block_state_ptr& bs ) { accepted_block( bs ); } ); - wlog("test_control_plugin::kill_node_on_producer() head connection created"); } void test_control_plugin_impl::disconnect() { @@ -57,13 +54,11 @@ void test_control_plugin_impl::disconnect() { } void test_control_plugin_impl::applied_irreversible_block(const chain::block_state_ptr& bsp) { - wlog("test_control_plugin_impl::applied_irreversible_block()"); if (_track_lib) retrieve_next_block_state(bsp); } void test_control_plugin_impl::accepted_block(const chain::block_state_ptr& bsp) { - wlog("test_control_plugin_impl::accepted_block()"); if (_track_head) retrieve_next_block_state(bsp); } @@ -97,29 +92,21 @@ void test_control_plugin_impl::process_next_block_state(const chain::block_heade } void test_control_plugin_impl::kill_on_lib(account_name prod, uint32_t where_in_seq) { - wlog("test_control_plugin_impl::kill_on_lib() 1"); _track_head = false; - wlog("test_control_plugin_impl::kill_on_lib() 2"); _producer = prod; _where_in_sequence = static_cast(where_in_seq); _producer_sequence = -1; _clean_producer_sequence = false; - wlog("test_control_plugin_impl::kill_on_lib() 3"); _track_lib = true; - wlog("test_control_plugin_impl::kill_on_lib() 4"); } void test_control_plugin_impl::kill_on_head(account_name prod, uint32_t where_in_seq) { - wlog("test_control_plugin_impl::kill_on_head() 1"); _track_lib = false; - wlog("test_control_plugin_impl::kill_on_head() 2"); _producer = prod; _where_in_sequence = static_cast(where_in_seq); _producer_sequence = -1; _clean_producer_sequence = false; - wlog("test_control_plugin_impl::kill_on_head() 3"); _track_head = true; - wlog("test_control_plugin_impl::kill_on_head() 4"); } test_control_plugin::test_control_plugin() @@ -128,20 +115,16 @@ test_control_plugin::test_control_plugin() } void test_control_plugin::set_program_options(options_description& cli, options_description& cfg) { - wlog("test_control_plugin::set_program_options()"); } void test_control_plugin::plugin_initialize(const variables_map& options) { - wlog("test_control_plugin::plugin_initialize()"); } void test_control_plugin::plugin_startup() { - wlog("test_control_plugin::plugin_startup()"); my->connect(); } void test_control_plugin::plugin_shutdown() { - wlog("test_control_plugin::plugin_shutdown()"); my->disconnect(); } From 7d575b824d197295a2a54473d1a93d27d6a44614 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 15:52:39 -0500 Subject: [PATCH 104/194] Fixed bug in curl call to kill_node_on_producer. GH #4973 --- tests/Node.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index c0bba562e16..9b29ba20f94 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -910,8 +910,9 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head assert(isinstance(whereInSequence, int)) assert(isinstance(blockType, BlockType)) assert(isinstance(returnType, ReturnType)) - basedOnLib=True if blockType==BlockType.lib else False - cmd="curl %s -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":%s }' -X POST -H \"Content-Type: application/json\"" % (self.endpointHttp, producer, whereInSequence, basedOnLib) + basedOnLib="true" if blockType==BlockType.lib else "false" + cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \ + (self.endpointHttp, producer, whereInSequence, basedOnLib) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) trans=None try: From 9b153f776952c7f8eb39204dc029e31e7dc29838 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 15:53:31 -0500 Subject: [PATCH 105/194] Added getBlockProducer method to retrieve block producer for head or LIB block. GH #4973 --- tests/Node.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/Node.py b/tests/Node.py index 9b29ba20f94..8a8946c9c71 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -36,6 +36,7 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= self.port=port self.pid=pid self.cmd=cmd + if Utils.Debug: Utils.Print("new Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) self.killed=False # marks node as killed self.enableMongo=enableMongo self.mongoHost=mongoHost @@ -1072,6 +1073,15 @@ def getBlockProducerByNum(self, blockNum, timeout=None, waitForBlock=True, exitO errorExit("Failed to get block's producer") return blockProducer + def getBlockProducer(self, timeout=None, waitForBlock=True, exitOnError=True, blockType=BlockType.head): + blockNum=self.getBlockNum(blockType=blockType) + block=self.getBlock(blockNum, exitOnError=exitOnError, blockType=blockType) + blockProducer=block["producer"] + if blockProducer is None and exitOnError: + Utils.cmdError("could not get producer for block number %s" % (blockNum)) + errorExit("Failed to get block's producer") + return blockProducer + def getNextCleanProductionCycle(self, trans): transId=Node.getTransId(trans) rounds=21*12*2 # max time to ensure that at least 2/3+1 of producers x blocks per producer x at least 2 times @@ -1147,6 +1157,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim Utils.Print("cmd: %s" % (cmd)) popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) self.pid=popen.pid + if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) def isNodeAlive(): """wait for node to be responsive.""" From d3c9b7d5a55b976b38e2ca73291803f46b5fe2a7 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 16:00:54 -0500 Subject: [PATCH 106/194] Cleanup error handling. GH #4973 --- tests/Cluster.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 2c6d1823920..6762e276620 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -919,11 +919,11 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKil producerKeys=Cluster.parseClusterKeys(totalNodes) # should have totalNodes node plus bios node - if producerKeys is None or len(producerKeys) < (totalProducers+1): - if producerKeys is None: - Utils.Print("ERROR: Failed to parse any producer keys from config files.") - else: - Utils.Print("ERROR: Failed to parse %d producer keys from cluster config files, only found %d." % (totalNodes+1,len(producerKeys))) + if producerKeys is None: + Utils.Print("ERROR: Failed to parse any producer keys from config files.") + return None + elif len(producerKeys) < (totalProducers+1): + Utils.Print("ERROR: Failed to parse %d producer keys from cluster config files, only found %d." % (totalProducers+1,len(producerKeys))) return None walletMgr=WalletMgr(True) From b0d694c805e10160e0e1594b08d6771017f477bd Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 16:02:50 -0500 Subject: [PATCH 107/194] Added determining the pid for the bios node. GH #4973 --- tests/Cluster.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/Cluster.py b/tests/Cluster.py index 6762e276620..2d7f5d3cbdc 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -310,6 +310,8 @@ def getNodeNum(nodeName): Utils.Print("ERROR: Bootstrap failed.") return False + self.discoverBiosNodePid() + # validate iniX accounts can be retrieved producerKeys=Cluster.parseClusterKeys(totalNodes) @@ -1218,6 +1220,16 @@ def myFunc(): if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes + def discoverBiosNodePid(self, timeout=None): + psOut=Cluster.pgrepEosServers(timeout=timeout) + pattern=Cluster.pgrepEosServerPattern("bios") + Utils.Print("pattern={\n%s\n}, psOut=\n%s\n" % (pattern,psOut)) + m=re.search(pattern, psOut, re.MULTILINE) + if m is None: + Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + else: + self.biosNode.pid=int(m.group(1)) + # Kills a percentange of Eos instances starting from the tail and update eosInstanceInfos state def killSomeEosInstances(self, killCount, killSignalStr=Utils.SigKillTag): killSignal=signal.SIGKILL From ed314d8bba90f9fcd2abf48b4b6ec6c50142b764 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 16:06:02 -0500 Subject: [PATCH 108/194] Refactored out reusable methods out of discoverLocalNodes. GH #4973 --- tests/Cluster.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 2d7f5d3cbdc..cd7c48fea7a 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -1172,11 +1172,8 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKil return biosNode - - # Populates list of EosInstanceInfo objects, matched to actual running instances - def discoverLocalNodes(self, totalNodes, timeout=0): - nodes=[] - + @staticmethod + def pgrepEosServers(timeout=None): pgrepOpts="-fl" # pylint: disable=deprecated-method if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora","CentOS Linux","arch"]: @@ -1196,7 +1193,21 @@ def myFunc(): return None return None - psOut=Utils.waitForObj(myFunc, timeout) + return Utils.waitForObj(myFunc, timeout) + + @staticmethod + def pgrepEosServerPattern(nodeInstance): + if isinstance(nodeInstance, str): + return r"[\n]?(\d+) (.* --data-dir var/lib/node_%s .*)\n" % nodeInstance + else: + nodeInstanceStr="%02d" % nodeInstance + return Cluster.pgrepEosServerPattern(nodeInstanceStr) + + # Populates list of EosInstanceInfo objects, matched to actual running instances + def discoverLocalNodes(self, totalNodes, timeout=None): + nodes=[] + + psOut=Cluster.pgrepEosServers(timeout) if psOut is None: Utils.Print("ERROR: No nodes discovered.") return nodes @@ -1207,7 +1218,7 @@ def myFunc(): psOutDisplay=psOut[:6660]+"..." if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) for i in range(0, totalNodes): - pattern=r"[\n]?(\d+) (.* --data-dir var/lib/node_%02d .*)\n" % (i) + pattern=Cluster.pgrepEosServerPattern(i) m=re.search(pattern, psOut, re.MULTILINE) if m is None: Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) From 21b6b4f94fd4904feea625b108bd569f0f3c394e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 4 Sep 2018 16:07:05 -0500 Subject: [PATCH 109/194] Fix to bridge shape code in launch method. GH #4973 --- tests/Cluster.py | 78 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 52 insertions(+), 26 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index cd7c48fea7a..8d41f5f17a2 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -166,17 +166,11 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--specific-nodeos") cmdArr.append(arg) - shapeFilePrefix="shape_bridge" - shapeFile=shapeFilePrefix+".json" - shapeHostsFile=shapeFilePrefix+"_hosts.json" # must be last cmdArr.append before subprocess.call, so that everything is on the command line # before constructing the shape.json file for "bridge" if topo=="bridge": - numProducers=totalProducers if totalProducers is not None else totalNodes - maxProducers=ord('z')-ord('a')+1 - assert numProducers Date: Tue, 4 Sep 2018 16:41:46 -0500 Subject: [PATCH 110/194] Changes to use 2 producer nodes and verification changes. GH #4973 --- tests/nodeos_forked_chain_test.py | 207 ++++++++++++++++++++++++++---- 1 file changed, 182 insertions(+), 25 deletions(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index c01a0b82e43..effe0c8a356 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -5,6 +5,7 @@ import time from Cluster import Cluster from WalletMgr import WalletMgr +from Node import BlockType from Node import Node from TestHelper import AppArgs from TestHelper import TestHelper @@ -12,6 +13,7 @@ import decimal import math import re +import signal ############################################################### # nodeos_voting_test @@ -24,9 +26,11 @@ args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"}) Utils.Debug=args.v -totalProducerNodes=21 +totalProducerNodes=2 totalNonProducerNodes=1 totalNodes=totalProducerNodes+totalNonProducerNodes +maxActiveProducers=21 +totalProducers=maxActiveProducers cluster=Cluster(walletd=True) dumpErrorDetails=args.dump_error_details keepLogs=args.keep_logs @@ -53,7 +57,7 @@ # producer nodes will be mapped to 0 through totalProducerNodes-1, so totalProducerNodes will be the non-producing node specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin" if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, topo="bridge", pnodes=totalProducerNodes, - totalNodes=totalNodes, totalProducers=totalProducerNodes, p2pPlugin=p2pPlugin, + totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") @@ -89,25 +93,33 @@ Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) + def printNode(node, msg=None): + if msg is None: + msg="" + else: + msg+=" " + Utils.Print("%sNode host=%s, port=%s, pid=%s" % (msg, node.host, node.port, node.pid)) + nonProdNode=None prodNodes=[] - producers={} + producers=[] for i in range(0, totalNodes): node=cluster.getNode(i) node.producers=Cluster.parseProducers(i) numProducers=len(node.producers) - if numProducers==1: - prod=node.producers[0] - trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) - prodNodes.append(node) - producers[prod]=node - elif numProducers==0: + Utils.Print("node has producers=%s" % (node.producers)) + if numProducers==0: if nonProdNode is None: nonProdNode=node + nonProdNode.nodeNum=i else: Utils.errorExit("More than one non-producing nodes") else: - Utils.errorExit("Producing node should have 1 producer, it has %d" % (numProducers)) + for prod in node.producers: + trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) + + prodNodes.append(node) + producers.extend(node.producers) node=prodNodes[0] # create accounts via eosio as otherwise a bid is needed @@ -123,24 +135,26 @@ cluster.waitOnClusterSync(blockAdvancing=5) index=0 for account in accounts: - trans=prodNodes[index].vote(account, producers) + Utils.Print("vote for producers=%s" % (producers)) + trans=prodNodes[index % len(prodNodes)].vote(account, producers) index+=1 #verify nodes are in sync and advancing cluster.waitOnClusterSync(blockAdvancing=5) blockNum=node.getNextCleanProductionCycle(trans) - blockProducer=node.getBlockProducer(blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) Utils.Print("Validating blockNum=%s, producer=%s" % (blockNum, blockProducer)) + cluster.biosNode.kill(signal.SIGTERM) lastBlockProducer=blockProducer while blockProducer==lastBlockProducer: blockNum+=1 - blockProducer=node.getBlockProducer(blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) productionCycle=[] producerToSlot={} slot=-1 - expectedCount=12 + inRowCountPerProducer=12 while True: if blockProducer not in producers: Utils.errorExit("Producer %s was not one of the voted on producers" % blockProducer) @@ -155,10 +169,10 @@ while blockProducer==lastBlockProducer: producerToSlot[blockProducer]["count"]+=1 blockNum+=1 - blockProducer=node.getBlockProducer(blockNum) + blockProducer=node.getBlockProducerByNum(blockNum) - if producerToSlot[lastBlockProducer]["count"]!=expectedCount: - Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, expectedCount, producerToSlot[lastBlockProducer]["count"])) + if producerToSlot[lastBlockProducer]["count"]!=inRowCountPerProducer: + Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, inRowCountPerProducer, producerToSlot[lastBlockProducer]["count"])) if blockProducer==productionCycle[0]: break @@ -178,19 +192,162 @@ cluster.reportStatus() Utils.Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.") - nonProdNode.killNodeOnProducer(producer="defproducerl", whereInSequence=0) - - for prodNode in prodNodes: - prodNode.getInfo() - - cluster.reportStatus() + # block number to start expecting node killed after + preKillBlockNum=nonProdNode.getBlockNum() + preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum) + # kill at last block before defproducerl, since the block it is killed on will get propagated + killAtProducer="defproducerk" + nonProdNode.killNodeOnProducer(producer=killAtProducer, whereInSequence=(inRowCountPerProducer-1)) + + # will search full cycle after the current block, since we don't know how many blocks were produced since retrieving + # block number and issuing kill command + postKillBlockNum=prodNodes[1].getBlockNum() + blockProducers0=[] + blockProducers1=[] + libs0=[] + libs1=[] + lastBlockNum=max([preKillBlockNum,postKillBlockNum])+maxActiveProducers*inRowCountPerProducer + Utils.Print("preKillBlockNum=%s, postKillBlockNum=%s, lastBlockNum=%d" % (preKillBlockNum,postKillBlockNum,lastBlockNum)) + actualLastBlockNum=None + prodChanged=False + nextProdChange=False + info0=prodNodes[0].getInfo(exitOnError=True) + info1=prodNodes[1].getInfo(exitOnError=True) + headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"])) + libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"])) + for blockNum in range(preKillBlockNum,lastBlockNum): + if blockNum>headBlockNum: + info0=prodNodes[0].getInfo(exitOnError=True) + info1=prodNodes[1].getInfo(exitOnError=True) + headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"])) + libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"])) + + blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) + blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) + blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) + blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) + #Utils.Print("blockProducer0=%s, blockProducer1=%s, blockNum=%d" % (blockProducer0,blockProducer1,blockNum)) + # ensure that we wait for the next instance of killAtProducer + if not prodChanged: + if preKillBlockProducer!=blockProducer0: + prodChanged=True + if not nextProdChange and prodChanged and blockProducer1==killAtProducer: + nextProdChange=True + elif nextProdChange and blockProducer1!=killAtProducer: + actualLastBlockNum=blockNum + break + if blockProducer0!=blockProducer1: + Utils.errorExit("Groups reported different block producers for block number %d. %s != %s." % (blockNum,blockProducer0,blockProducer1)) + + def analyzeBPs(bps0, bps1, expectDivergence): + start=0 + index=None + length=len(bps0) + firstDivergence=None + printInfo=False + while start < length: + bpsStr=None + for i in range(start,length): + bp0=bps0[i] + bp1=bps1[i] + if bpsStr is None: + bpsStr="" + else: + bpsStr+=", " + blockNum0=bp0["blockNum"] + prod0=bp0["prod"] + blockNum1=bp1["blockNum"] + prod1=bp1["prod"] + numDiff=True if blockNum0!=blockNum1 else False + prodDiff=True if prod0!=prod1 else False + if numDiff or prodDiff: + index=i + if firstDivergence is None: + firstDivergence=min(blockNum0, blockNum1) + if not expectDivergence: + printInfo=True + break + bpsStr+=str(blockNum0)+"->"+prod0 + + if index is None: + return + + bpsStr0=None + bpsStr2=None + start=length + for i in range(index,length): + if bpsStr0 is None: + bpsStr0="" + bpsStr1="" + else: + bpsStr0+=", " + bpsStr1+=", " + bp0=bps0[i] + bp1=bps1[i] + blockNum0=bp0["blockNum"] + prod0=bp0["prod"] + blockNum1=bp1["blockNum"] + prod1=bp1["prod"] + numDiff="*" if blockNum0!=blockNum1 else "" + prodDiff="*" if prod0!=prod1 else "" + if not numDiff and not prodDiff: + start=i + index=None + break + bpsStr0+=str(blockNum0)+numDiff+"->"+prod0+prodDiff + bpsStr1+=str(blockNum1)+numDiff+"->"+prod1+prodDiff + if printInfo: + Utils.Print("ERROR: Analyzing Block Producers, did not expect nodes to indicate different block producers for the same blocks.") + Utils.Print("Matchinb Blocks= %s" % (bpsStr)) + Utils.Print("Diverging branch node0= %s" % (bpsStr0)) + Utils.Print("Diverging branch node1= %s" % (bpsStr1)) + return firstDivergence + + firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) + # Nodes should not have diverged till the last block + Utils.Print("firstDivergence=%s, blockNum=%s" % (firstDivergence, blockNum)) + assert(firstDivergence==blockNum) + blockProducers0=[] + blockProducers1=[] - time.sleep(60) assert(not nonProdNode.verifyAlive()) for prodNode in prodNodes: prodNode.getInfo() - cluster.reportStatus() + killBlockNum=blockNum +# lastBlockNum=killBlockNum+(maxActiveProducers - 1)*inRowCountPerProducer+1 # allow 1st testnet group to produce just 1 more block than the 2nd + lastBlockNum=prodNodes[1].getBlockNum()+(maxActiveProducers - 1)*inRowCountPerProducer+1 # allow 1st testnet group to produce just 1 more block than the 2nd + for blockNum in range(killBlockNum,lastBlockNum): + blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) + blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) + blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) + blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) + + info0=prodNodes[0].getInfo(blockNum) + info1=prodNodes[1].getInfo(blockNum) + Utils.Print("info0=%s\n\ninfo1=%s\n\n" % (info0, info1)) + firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) + assert(firstDivergence==killBlockNum) + blockProducers0=[] + blockProducers1=[] + + if not nonProdNode.relaunch(nonProdNode.nodeNum, None): + errorExit("Failure - (non-production) node %d should have restarted" % (nonProdNode.nodeNum)) + + # ensure all blocks from the lib before divergence till the current head are now in consensus + endBlockNum=max(prodNodes[0].getBlockNum(), prodNodes[1].getBlockNum()) + + for blockNum in range(libNum,endBlockNum): + blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) + blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) + blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) + blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) + + + firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=False) + assert(firstDivergence==None) + blockProducers0=[] + blockProducers1=[] testSuccessful=True finally: From 2590ea9f56afbe53e61466379b6858ed4651a578 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 6 Sep 2018 11:27:53 -0500 Subject: [PATCH 111/194] Fixed forked integration test for merged bios boot file changes. GH #4973 --- tests/nodeos_forked_chain_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index effe0c8a356..337095ca523 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -58,7 +58,7 @@ specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin" if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, topo="bridge", pnodes=totalProducerNodes, totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin, - specificExtraNodeosArgs=specificExtraNodeosArgs) is False: + useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") From 1a0eba2070bc30a273900fbac20a67ae44cdcbce Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 6 Sep 2018 11:28:24 -0500 Subject: [PATCH 112/194] Cleaned up test code. GH #4973 --- .../test_control_api_plugin.cpp | 33 +------------------ 1 file changed, 1 insertion(+), 32 deletions(-) diff --git a/plugins/test_control_api_plugin/test_control_api_plugin.cpp b/plugins/test_control_api_plugin/test_control_api_plugin.cpp index 8bca87b2167..91d5535c796 100644 --- a/plugins/test_control_api_plugin/test_control_api_plugin.cpp +++ b/plugins/test_control_api_plugin/test_control_api_plugin.cpp @@ -38,7 +38,6 @@ struct async_result_visitor : public fc::visitor { #define CALL(api_name, api_handle, api_namespace, call_name, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ [this, api_handle](string, string body, url_response_callback cb) mutable { \ - wlog("test_control_api_plugin CALL"); \ try { \ if (body.empty()) body = "{}"; \ auto result = api_handle.call_name(fc::json::from_string(body).as()); \ @@ -50,42 +49,12 @@ struct async_result_visitor : public fc::visitor { #define TEST_CONTROL_RW_CALL(call_name, http_response_code) CALL(test_control, rw_api, test_control_apis::read_write, call_name, http_response_code) -/* -#define CALL(api_name, api_handle, api_namespace, call_name, http_response_code) \ -{std::string("/v1/" #api_name "/" #call_name), \ - [this, api_handle](string, string body, url_response_callback cb) mutable { \ - try { \ - wlog("test_control_api_plugin CALL"); \ - wlog("test_control_api_plugin CALL body=${body}",("body",body)); \ - if (body.empty()) body = "{}"; \ - auto result = api_handle.call_name(fc::json::from_string(body).as()); \ - cb(http_response_code, fc::json::to_string(result)); \ - } catch (...) { \ - http_plugin::handle_exception(#api_name, #call_name, body, cb); \ - } \ - }} - -#define TEST_CONTROL_RW_CALL(call_name, http_response_code) CALL(test_control, rw_api, test_control_apis::read_write, call_name, http_response_code) -*/ void test_control_api_plugin::plugin_startup() { - ilog( "starting test_control_api_plugin" ); -// auto& chain = app().get_plugin().chain(); my.reset(new test_control_api_plugin_impl(app().get_plugin().chain())); auto rw_api = app().get_plugin().get_read_write_api(); app().get_plugin().add_api({ -// TEST_CONTROL_RW_CALL(kill_node_on_producer, 202) - {std::string("/v1/test_control/kill_node_on_producer"), - [this, rw_api](string, string body, url_response_callback cb) mutable { - wlog("test_control_api_plugin CALL"); - try { - if (body.empty()) body = "{}"; - auto result = rw_api.kill_node_on_producer(fc::json::from_string(body).as()); - cb(202, fc::json::to_string(result)); - } catch (...) { - http_plugin::handle_exception("rw_api", "kill_node_on_producer", body, cb); - } - }} + TEST_CONTROL_RW_CALL(kill_node_on_producer, 202) }); } From f101c934c5e96ac91d12355da682912f9635a1f4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 6 Sep 2018 12:31:42 -0500 Subject: [PATCH 113/194] Cleanup logging statements. GH #4973 --- .../test_control_plugin.cpp | 7 ---- tests/TestHelper.py | 2 -- tests/nodeos_forked_chain_test.py | 33 +++++++------------ 3 files changed, 11 insertions(+), 31 deletions(-) diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp index 7b63acac1f0..0232b1dcf6a 100644 --- a/plugins/test_control_plugin/test_control_plugin.cpp +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -78,13 +78,11 @@ void test_control_plugin_impl::process_next_block_state(const chain::block_heade // start counting sequences for this producer (once we if (producer_name == _producer && _clean_producer_sequence) { _producer_sequence += 1; - wlog("test_control_plugin_impl::process_next_block_state() seq=${seq}",("seq",_producer_sequence)); if (_producer_sequence >= _where_in_sequence) { app().quit(); } } else if (producer_name != _producer) { - wlog("test_control_plugin_impl::process_next_block_state() reset"); _producer_sequence = -1; // can now guarantee we are at the start of the producer _clean_producer_sequence = true; @@ -130,16 +128,11 @@ void test_control_plugin::plugin_shutdown() { namespace test_control_apis { read_write::kill_node_on_producer_results read_write::kill_node_on_producer(const read_write::kill_node_on_producer_params& params) const { - wlog("test_control_plugin::kill_node_on_producer() ${prod} ${where_in_seq} ${head_lib}",("prod",params.producer.to_string())("where_in_seq",params.where_in_sequence)("head_lib", (params.based_on_lib ? "LIB" : "HEAD"))); if (params.based_on_lib) { - wlog("test_control_plugin::kill_node_on_producer() kill_on_lib"); my->kill_on_lib(params.producer, params.where_in_sequence); - wlog("test_control_plugin::kill_node_on_producer() kill_on_lib done"); } else { - wlog("test_control_plugin::kill_node_on_producer() kill_on_head"); my->kill_on_head(params.producer, params.where_in_sequence); - wlog("test_control_plugin::kill_node_on_producer() kill_on_head done"); } return read_write::kill_node_on_producer_results{}; } diff --git a/tests/TestHelper.py b/tests/TestHelper.py index a1d1fd1b6eb..4cec2b02a03 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -21,7 +21,6 @@ def __init__(self, flag, type, help, default, choices=None): def add(self, flag, type, help, default, choices=None): arg=self.AppArg(flag, type, help, default, choices) self.args.append(arg) - Utils.Print("args %d" % (len(self.args))) # pylint: disable=too-many-instance-attributes class TestHelper(object): @@ -36,7 +35,6 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): assert(includeArgs) assert(isinstance(includeArgs, set)) assert(isinstance(applicationSpecificArgs, AppArgs)) - Utils.Print("applicationSpecificArgs %d" % (len(applicationSpecificArgs.args))) parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-?', action='help', default=argparse.SUPPRESS, diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 337095ca523..51d10c5b10d 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -93,13 +93,6 @@ Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) - def printNode(node, msg=None): - if msg is None: - msg="" - else: - msg+=" " - Utils.Print("%sNode host=%s, port=%s, pid=%s" % (msg, node.host, node.port, node.pid)) - nonProdNode=None prodNodes=[] producers=[] @@ -107,7 +100,7 @@ def printNode(node, msg=None): node=cluster.getNode(i) node.producers=Cluster.parseProducers(i) numProducers=len(node.producers) - Utils.Print("node has producers=%s" % (node.producers)) + Print("node has producers=%s" % (node.producers)) if numProducers==0: if nonProdNode is None: nonProdNode=node @@ -132,18 +125,18 @@ def printNode(node, msg=None): trans=node.delegatebw(account, 20000000.0000, 20000000.0000, exitOnError=True) #verify nodes are in sync and advancing - cluster.waitOnClusterSync(blockAdvancing=5) + cluster.waitOnClusterSync(blockAdvancing=5) index=0 for account in accounts: - Utils.Print("vote for producers=%s" % (producers)) + Print("Vote for producers=%s" % (producers)) trans=prodNodes[index % len(prodNodes)].vote(account, producers) index+=1 #verify nodes are in sync and advancing - cluster.waitOnClusterSync(blockAdvancing=5) + cluster.waitOnClusterSync(blockAdvancing=5) blockNum=node.getNextCleanProductionCycle(trans) blockProducer=node.getBlockProducerByNum(blockNum) - Utils.Print("Validating blockNum=%s, producer=%s" % (blockNum, blockProducer)) + Print("Validating blockNum=%s, producer=%s" % (blockNum, blockProducer)) cluster.biosNode.kill(signal.SIGTERM) lastBlockProducer=blockProducer @@ -184,14 +177,14 @@ def printNode(node, msg=None): else: output+=", " output+=blockProducer+":"+str(producerToSlot[blockProducer]["count"]) - Utils.Print("ProductionCycle ->> {\n%s\n}" % output) + Print("ProductionCycle ->> {\n%s\n}" % output) for prodNode in prodNodes: prodNode.getInfo() cluster.reportStatus() - Utils.Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.") + Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.") # block number to start expecting node killed after preKillBlockNum=nonProdNode.getBlockNum() preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum) @@ -207,7 +200,6 @@ def printNode(node, msg=None): libs0=[] libs1=[] lastBlockNum=max([preKillBlockNum,postKillBlockNum])+maxActiveProducers*inRowCountPerProducer - Utils.Print("preKillBlockNum=%s, postKillBlockNum=%s, lastBlockNum=%d" % (preKillBlockNum,postKillBlockNum,lastBlockNum)) actualLastBlockNum=None prodChanged=False nextProdChange=False @@ -226,7 +218,6 @@ def printNode(node, msg=None): blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) - #Utils.Print("blockProducer0=%s, blockProducer1=%s, blockNum=%d" % (blockProducer0,blockProducer1,blockNum)) # ensure that we wait for the next instance of killAtProducer if not prodChanged: if preKillBlockProducer!=blockProducer0: @@ -297,15 +288,14 @@ def analyzeBPs(bps0, bps1, expectDivergence): bpsStr0+=str(blockNum0)+numDiff+"->"+prod0+prodDiff bpsStr1+=str(blockNum1)+numDiff+"->"+prod1+prodDiff if printInfo: - Utils.Print("ERROR: Analyzing Block Producers, did not expect nodes to indicate different block producers for the same blocks.") - Utils.Print("Matchinb Blocks= %s" % (bpsStr)) - Utils.Print("Diverging branch node0= %s" % (bpsStr0)) - Utils.Print("Diverging branch node1= %s" % (bpsStr1)) + Print("ERROR: Analyzing Block Producers, did not expect nodes to indicate different block producers for the same blocks.") + Print("Matching Blocks= %s" % (bpsStr)) + Print("Diverging branch node0= %s" % (bpsStr0)) + Print("Diverging branch node1= %s" % (bpsStr1)) return firstDivergence firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) # Nodes should not have diverged till the last block - Utils.Print("firstDivergence=%s, blockNum=%s" % (firstDivergence, blockNum)) assert(firstDivergence==blockNum) blockProducers0=[] blockProducers1=[] @@ -325,7 +315,6 @@ def analyzeBPs(bps0, bps1, expectDivergence): info0=prodNodes[0].getInfo(blockNum) info1=prodNodes[1].getInfo(blockNum) - Utils.Print("info0=%s\n\ninfo1=%s\n\n" % (info0, info1)) firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) assert(firstDivergence==killBlockNum) blockProducers0=[] From f80796cf2821ce48af2b287f2af0d0acd537dae8 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 10 Sep 2018 20:28:57 -0500 Subject: [PATCH 114/194] Fixes for Pull Request comments from Kevin H. GH #4973 --- tests/nodeos_forked_chain_test.py | 236 +++++++++++++++++++----------- 1 file changed, 150 insertions(+), 86 deletions(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 51d10c5b10d..5a9a1152ee3 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -16,7 +16,7 @@ import signal ############################################################### -# nodeos_voting_test +# nodeos_forked_chain_test # --dump-error-details # --keep-logs ############################################################### @@ -24,6 +24,84 @@ from core_symbol import CORE_SYMBOL +def analyzeBPs(bps0, bps1, expectDivergence): + start=0 + index=None + length=len(bps0) + firstDivergence=None + errorInDivergence=False + while start < length: + bpsStr=None + for i in range(start,length): + bp0=bps0[i] + bp1=bps1[i] + if bpsStr is None: + bpsStr="" + else: + bpsStr+=", " + blockNum0=bp0["blockNum"] + prod0=bp0["prod"] + blockNum1=bp1["blockNum"] + prod1=bp1["prod"] + numDiff=True if blockNum0!=blockNum1 else False + prodDiff=True if prod0!=prod1 else False + if numDiff or prodDiff: + index=i + if firstDivergence is None: + firstDivergence=min(blockNum0, blockNum1) + if not expectDivergence: + errorInDivergence=True + break + bpsStr+=str(blockNum0)+"->"+prod0 + + if index is None: + return + + bpsStr0=None + bpsStr2=None + start=length + for i in range(index,length): + if bpsStr0 is None: + bpsStr0="" + bpsStr1="" + else: + bpsStr0+=", " + bpsStr1+=", " + bp0=bps0[i] + bp1=bps1[i] + blockNum0=bp0["blockNum"] + prod0=bp0["prod"] + blockNum1=bp1["blockNum"] + prod1=bp1["prod"] + numDiff="*" if blockNum0!=blockNum1 else "" + prodDiff="*" if prod0!=prod1 else "" + if not numDiff and not prodDiff: + start=i + index=None + if expectDivergence: + errorInDivergence=True + break + bpsStr0+=str(blockNum0)+numDiff+"->"+prod0+prodDiff + bpsStr1+=str(blockNum1)+numDiff+"->"+prod1+prodDiff + if errorInDivergence: + msg="Failed analyzing block producers - " + if expectDivergence: + msg+="nodes indicate different block producers for the same blocks, but did not expect them to diverge." + else: + msg+="did not expect nodes to indicate different block producers for the same blocks." + msg+="\n Matching Blocks= %s \n Diverging branch node0= %s \n Diverging branch node1= %s" % (bpsStr,bpsStr0,bpsStr1) + Utils.errorExit(msg) + return firstDivergence + +def getMinHeadAndLib(prodNodes): + info0=prodNodes[0].getInfo(exitOnError=True) + info1=prodNodes[1].getInfo(exitOnError=True) + headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"])) + libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"])) + return (headBlockNum, libNum) + + + args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"}) Utils.Debug=args.v totalProducerNodes=2 @@ -54,20 +132,26 @@ cluster.cleanup() Print("Stand up cluster") specificExtraNodeosArgs={} - # producer nodes will be mapped to 0 through totalProducerNodes-1, so totalProducerNodes will be the non-producing node + # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin" + + + # *** setup topogrophy *** + + # "bridge" shape connects defprocera through defproducerk (in node0) to each other and defproducerl through defproduceru (in node01) + # and the only connection between those 2 groups is through the bridge node + if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, topo="bridge", pnodes=totalProducerNodes, totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") - - # "bridge" shape connects defprocera through defproducerk to each other and defproducerl through defproduceru and the only - # connection between those 2 groups is through the bridge node - Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) + + # *** create accounts to vote in desired producers *** + accounts=cluster.createAccountKeys(5) if accounts is None: Utils.errorExit("FAILURE - create keys") @@ -93,6 +177,9 @@ Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) + + # *** identify each node (producers and non-producing node) *** + nonProdNode=None prodNodes=[] producers=[] @@ -114,6 +201,9 @@ prodNodes.append(node) producers.extend(node.producers) + + # *** delegate bandwidth to accounts *** + node=prodNodes[0] # create accounts via eosio as otherwise a bid is needed for account in accounts: @@ -124,6 +214,9 @@ node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") trans=node.delegatebw(account, 20000000.0000, 20000000.0000, exitOnError=True) + + # *** vote using accounts *** + #verify nodes are in sync and advancing cluster.waitOnClusterSync(blockAdvancing=5) index=0 @@ -132,6 +225,9 @@ trans=prodNodes[index % len(prodNodes)].vote(account, producers) index+=1 + + # *** Identify a block where production is stable *** + #verify nodes are in sync and advancing cluster.waitOnClusterSync(blockAdvancing=5) blockNum=node.getNextCleanProductionCycle(trans) @@ -139,11 +235,15 @@ Print("Validating blockNum=%s, producer=%s" % (blockNum, blockProducer)) cluster.biosNode.kill(signal.SIGTERM) + #advance to the next block of 12 lastBlockProducer=blockProducer while blockProducer==lastBlockProducer: blockNum+=1 blockProducer=node.getBlockProducerByNum(blockNum) + + # *** Identify what the production cycel is *** + productionCycle=[] producerToSlot={} slot=-1 @@ -179,11 +279,14 @@ output+=blockProducer+":"+str(producerToSlot[blockProducer]["count"]) Print("ProductionCycle ->> {\n%s\n}" % output) - for prodNode in prodNodes: - prodNode.getInfo() - + #retrieve the info for all the nodes to report the status for each + for node in cluster.getNodes(): + node.getInfo() cluster.reportStatus() + + # *** Killing the "bridge" node *** + Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.") # block number to start expecting node killed after preKillBlockNum=nonProdNode.getBlockNum() @@ -192,6 +295,9 @@ killAtProducer="defproducerk" nonProdNode.killNodeOnProducer(producer=killAtProducer, whereInSequence=(inRowCountPerProducer-1)) + + # *** Identify a highest block number to check while we are trying to identify where the divergence will occur *** + # will search full cycle after the current block, since we don't know how many blocks were produced since retrieving # block number and issuing kill command postKillBlockNum=prodNodes[1].getBlockNum() @@ -203,96 +309,38 @@ actualLastBlockNum=None prodChanged=False nextProdChange=False - info0=prodNodes[0].getInfo(exitOnError=True) - info1=prodNodes[1].getInfo(exitOnError=True) - headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"])) - libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"])) + #identify the earliest LIB to start identify the earliest block to check if divergent branches eventually reach concensus + (headBlockNum, libNumAroundDivergence)=getMinHeadAndLib(prodNodes) for blockNum in range(preKillBlockNum,lastBlockNum): + #avoiding getting LIB until my current block passes the head from the last time I checked if blockNum>headBlockNum: - info0=prodNodes[0].getInfo(exitOnError=True) - info1=prodNodes[1].getInfo(exitOnError=True) - headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"])) - libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"])) + (headBlockNum, libNumAroundDivergence)=getMinHeadAndLib(prodNodes) + # track the block number and producer from each producing node blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) - # ensure that we wait for the next instance of killAtProducer + + #in the case that the preKillBlockNum was also produced by killAtProducer, ensure that we have + #at least one producer transition before checking for killAtProducer if not prodChanged: if preKillBlockProducer!=blockProducer0: prodChanged=True + + #since it is killing for the last block of killAtProducer, we look for the next producer change if not nextProdChange and prodChanged and blockProducer1==killAtProducer: nextProdChange=True elif nextProdChange and blockProducer1!=killAtProducer: actualLastBlockNum=blockNum break + + #if we diverge before identifying the actualLastBlockNum, then there is an ERROR if blockProducer0!=blockProducer1: Utils.errorExit("Groups reported different block producers for block number %d. %s != %s." % (blockNum,blockProducer0,blockProducer1)) - def analyzeBPs(bps0, bps1, expectDivergence): - start=0 - index=None - length=len(bps0) - firstDivergence=None - printInfo=False - while start < length: - bpsStr=None - for i in range(start,length): - bp0=bps0[i] - bp1=bps1[i] - if bpsStr is None: - bpsStr="" - else: - bpsStr+=", " - blockNum0=bp0["blockNum"] - prod0=bp0["prod"] - blockNum1=bp1["blockNum"] - prod1=bp1["prod"] - numDiff=True if blockNum0!=blockNum1 else False - prodDiff=True if prod0!=prod1 else False - if numDiff or prodDiff: - index=i - if firstDivergence is None: - firstDivergence=min(blockNum0, blockNum1) - if not expectDivergence: - printInfo=True - break - bpsStr+=str(blockNum0)+"->"+prod0 - - if index is None: - return - - bpsStr0=None - bpsStr2=None - start=length - for i in range(index,length): - if bpsStr0 is None: - bpsStr0="" - bpsStr1="" - else: - bpsStr0+=", " - bpsStr1+=", " - bp0=bps0[i] - bp1=bps1[i] - blockNum0=bp0["blockNum"] - prod0=bp0["prod"] - blockNum1=bp1["blockNum"] - prod1=bp1["prod"] - numDiff="*" if blockNum0!=blockNum1 else "" - prodDiff="*" if prod0!=prod1 else "" - if not numDiff and not prodDiff: - start=i - index=None - break - bpsStr0+=str(blockNum0)+numDiff+"->"+prod0+prodDiff - bpsStr1+=str(blockNum1)+numDiff+"->"+prod1+prodDiff - if printInfo: - Print("ERROR: Analyzing Block Producers, did not expect nodes to indicate different block producers for the same blocks.") - Print("Matching Blocks= %s" % (bpsStr)) - Print("Diverging branch node0= %s" % (bpsStr0)) - Print("Diverging branch node1= %s" % (bpsStr1)) - return firstDivergence + + # *** Analyze the producers leading up to the block after killing the non-producing node *** firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) # Nodes should not have diverged till the last block @@ -300,39 +348,55 @@ def analyzeBPs(bps0, bps1, expectDivergence): blockProducers0=[] blockProducers1=[] + #verify that the non producing node is not alive (and populate the producer nodes with current getInfo data to report if + #an error occurs) assert(not nonProdNode.verifyAlive()) for prodNode in prodNodes: prodNode.getInfo() + + # *** Track the blocks from the divergence till there are 10*12 blocks on one chain and 10*12+1 on the other *** + killBlockNum=blockNum -# lastBlockNum=killBlockNum+(maxActiveProducers - 1)*inRowCountPerProducer+1 # allow 1st testnet group to produce just 1 more block than the 2nd - lastBlockNum=prodNodes[1].getBlockNum()+(maxActiveProducers - 1)*inRowCountPerProducer+1 # allow 1st testnet group to produce just 1 more block than the 2nd + lastBlockNum=killBlockNum+(maxActiveProducers - 1)*inRowCountPerProducer+1 # allow 1st testnet group to produce just 1 more block than the 2nd for blockNum in range(killBlockNum,lastBlockNum): blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) - info0=prodNodes[0].getInfo(blockNum) - info1=prodNodes[1].getInfo(blockNum) + + # *** Analyze the producers from the divergence to the lastBlockNum and verify they stay diverged *** + firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) assert(firstDivergence==killBlockNum) blockProducers0=[] blockProducers1=[] + + # *** Relaunch the non-producing bridge node to connect the producing nodes again *** + if not nonProdNode.relaunch(nonProdNode.nodeNum, None): errorExit("Failure - (non-production) node %d should have restarted" % (nonProdNode.nodeNum)) + + # *** Identify the producers from the saved LIB to the current highest head *** + + #ensure that the nodes have enough time to get in concensus, so wait for 3 producers to produce their complete round + time.sleep(inRowCountPerProducer * 3 / 2) + # ensure all blocks from the lib before divergence till the current head are now in consensus endBlockNum=max(prodNodes[0].getBlockNum(), prodNodes[1].getBlockNum()) - for blockNum in range(libNum,endBlockNum): + for blockNum in range(libNumAroundDivergence,endBlockNum): blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0}) blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) + # *** Analyze the producers from the saved LIB to the current highest head and verify they match now *** + firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=False) assert(firstDivergence==None) blockProducers0=[] From d46744347bffbc596f49c86d17514481848a0d8a Mon Sep 17 00:00:00 2001 From: Eugene Chung Date: Tue, 11 Sep 2018 10:59:55 +0900 Subject: [PATCH 115/194] remove doxygen warning --- contracts/eosiolib/compiler_builtins.h | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/contracts/eosiolib/compiler_builtins.h b/contracts/eosiolib/compiler_builtins.h index 62e2ff2515e..3e0d9435357 100644 --- a/contracts/eosiolib/compiler_builtins.h +++ b/contracts/eosiolib/compiler_builtins.h @@ -189,7 +189,7 @@ extern "C" { /** * Add two long doubles split as two 64 bit unsigned integers and assign the value to the first parameter. * @brief Add two long doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. + * @param ret It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -201,7 +201,7 @@ extern "C" { /** * Subtract two long doubles split as two 64 bit unsigned integers and assign the value to the first parameter. * @brief Subtract two long doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. + * @param ret It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -213,7 +213,7 @@ extern "C" { /** * Multiply two long doubles split as two 64 bit unsigned integers and assign the value to the first parameter. * @brief Multiply two long doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. + * @param ret It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -225,7 +225,7 @@ extern "C" { /** * Divide two long doubles split as two 64 bit unsigned integers and assign the value to the first parameter. * @brief Divide two long doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. + * @param ret It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -237,7 +237,6 @@ extern "C" { /** * Check equality between two doubles split as two 64 bit unsigned integers * @brief Check equality between two doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -252,7 +251,6 @@ extern "C" { /** * Check inequality between two doubles split as two 64 bit unsigned integers * @brief Check inequality between two doubles (which are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -268,7 +266,6 @@ extern "C" { /** * Check if the first double is greater or equal to the second double, the doubles are split as two 64 bit unsigned integers * @brief Check if the first double is greater or equal to the second double, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -283,7 +280,6 @@ extern "C" { /** * Check if the first double is greater than the second double, the doubles are split as two 64 bit unsigned integers * @brief Check if the first double is greater than the second double, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -298,7 +294,6 @@ extern "C" { /** * Check if the first double is less or equal to the second double, the doubles are split as two 64 bit unsigned integers * @brief Check if the first double is less or equal to the second double, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -313,7 +308,6 @@ extern "C" { /** * Check if the first double is less than the second double, the doubles are split as two 64 bit unsigned integers * @brief Check if the first double is less than the second double, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -328,7 +322,6 @@ extern "C" { /** * Compare two doubles which are split as two 64 bit unsigned integers * @brief Compare two doubles (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. @@ -343,7 +336,6 @@ extern "C" { /** * Check if either of the doubles is NaN, the doubles are split as two 64 bit unsigned integers * @brief Check if either of the doubles is NaN, (the doubles are represented as two 64 bit unsigned integers) - * @param res It will be replaced with the result product. * @param la Low 64 bits of the first 128 bit factor. * @param ha High 64 bits of the first 128 bit factor. * @param lb Low 64 bits of the second 128 bit factor. From d4bf4c3ff5389991d1326532fffdbbe31356a28e Mon Sep 17 00:00:00 2001 From: Eugene Chung Date: Tue, 11 Sep 2018 11:01:45 +0900 Subject: [PATCH 116/194] remove doxygen warning --- contracts/eosiolib/fixedpoint.hpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/contracts/eosiolib/fixedpoint.hpp b/contracts/eosiolib/fixedpoint.hpp index 9f2ced9fd2c..8c36f5ea54b 100644 --- a/contracts/eosiolib/fixedpoint.hpp +++ b/contracts/eosiolib/fixedpoint.hpp @@ -362,7 +362,7 @@ namespace eosio * Assignment operator. Assign fixed_point32 to fixed_point64 * * @brief Assignment operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return fixed_point64& - Reference to this object */ @@ -372,7 +372,7 @@ namespace eosio * Assignment operator. Assign fixed_point64 to fixed_point64 * * @brief Assignment operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return fixed_point64& - Reference to this object */ @@ -426,7 +426,7 @@ namespace eosio * Equality operator * * @brief Equality operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -437,7 +437,7 @@ namespace eosio * Greater than operator * * @brief Greater than operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -448,7 +448,7 @@ namespace eosio * Less than operator * * @brief Less than operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -506,7 +506,7 @@ namespace eosio * Construct a new fixed point32 object from int32_t * * @brief Construct a new fixed point32 object - * @param v - int32_t representation of the fixed point value + * @param param - int32_t representation of the fixed point value */ fixed_point32(int32_t param=0) : val(param) {} @@ -553,7 +553,7 @@ namespace eosio * Assignment operator. Assign fixed_point32 to fixed_point32 * * @brief Assignment operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return fixed_point32& - Reference to this object */ @@ -563,7 +563,7 @@ namespace eosio * Assignment operator. Assign fixed_point64 to fixed_point32 * * @brief Assignment operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return fixed_point32& - Reference to this object */ @@ -615,7 +615,7 @@ namespace eosio * Equality operator * * @brief Equality operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -626,7 +626,7 @@ namespace eosio * Greater than operator * * @brief Greater than operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise @@ -637,7 +637,7 @@ namespace eosio * Less than operator * * @brief Less than operator - * @tparam qr - Precision of the source + * @tparam QR - Precision of the source * @param r - Source * @return true - if equal * @return false - otherwise From ee52aba0aac342426be0bb9b72220fdea7f49200 Mon Sep 17 00:00:00 2001 From: Jerry <1032246642@qq.com> Date: Tue, 11 Sep 2018 17:32:05 +0800 Subject: [PATCH 117/194] avoid plugin through irreversible_block signal change block, so that apply_block faild. andkeep signal according to the order of accepted_transaction, applied_transaction and irreversible_block. --- libraries/chain/controller.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 0c582871a11..2b7562c3e7d 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1026,13 +1026,15 @@ struct controller_impl { bool trust = !conf.force_all_checks && (s == controller::block_status::irreversible || s == controller::block_status::validated); auto new_header_state = fork_db.add( b, trust ); emit( self.accepted_block_header, new_header_state ); - // on replay irreversible is not emitted by fork database, so emit it explicitly here - if( s == controller::block_status::irreversible ) - emit( self.irreversible_block, new_header_state ); if ( read_mode != db_read_mode::IRREVERSIBLE ) { maybe_switch_forks( s ); } + + // on replay irreversible is not emitted by fork database, so emit it explicitly here + if( s == controller::block_status::irreversible ) + emit( self.irreversible_block, new_header_state ); + } FC_LOG_AND_RETHROW( ) } From 026556eb62b0ffe5f9f972a9a3ccca382ea9ed07 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 12 Sep 2018 09:58:17 -0500 Subject: [PATCH 118/194] Insert action_traces after transaction_trace. Fix merge issue. --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 82 +++++++++------------ 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 3255f993e93..45917fa4e26 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -822,46 +822,53 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio } } - if( write_atraces ) { - try { - if( !bulk_action_traces.execute() ) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Bulk action traces insert failed for transaction trace: ${id}", ("id", t->id)); - } - } catch(...) { - handle_mongo_exception("action traces insert", __LINE__); - } - } - - if( !start_block_reached || !store_transaction_traces ) return; + if( !start_block_reached ) return; //< add_action_trace calls update_account which must be called always if( !write_atraces ) return; //< do not insert transaction_trace if all action_traces filtered out // transaction trace insert - auto v = to_variant_with_abi( *t ); - string json = fc::json::to_string( v ); - try { - const auto& value = bsoncxx::from_json( json ); - trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - } catch( bsoncxx::exception& ) { + if( store_transaction_traces ) { try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - trans_traces_doc.append( kvp( "non-utf8-purged", b_bool{true} )); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert transaction JSON to MongoDB JSON: ${e}", ("e", e.what())); - elog( " JSON: ${j}", ("j", json)); + auto v = to_variant_with_abi( *t ); + string json = fc::json::to_string( v ); + try { + const auto& value = bsoncxx::from_json( json ); + trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); + } catch( bsoncxx::exception& ) { + try { + json = fc::prune_invalid_utf8( json ); + const auto& value = bsoncxx::from_json( json ); + trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); + trans_traces_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert transaction JSON to MongoDB JSON: ${e}", ("e", e.what()) ); + elog( " JSON: ${j}", ("j", json) ); + } + } + trans_traces_doc.append( kvp( "createdAt", b_date{now} ) ); + + try { + if( !_trans_traces.insert_one( trans_traces_doc.view() ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert trans ${id}", ("id", t->id) ); + } + } catch( ... ) { + handle_mongo_exception( "trans_traces insert: " + json, __LINE__ ); + } + } catch( ... ) { + handle_mongo_exception( "trans_traces serialization: " + t->id.str(), __LINE__ ); } } - trans_traces_doc.append( kvp( "createdAt", b_date{now} )); + // insert action_traces try { - if( !_trans_traces.insert_one( trans_traces_doc.view())) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert trans ${id}", ("id", t->id)); + if( !bulk_action_traces.execute() ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, + "Bulk action traces insert failed for transaction trace: ${id}", ("id", t->id) ); } - } catch(...) { - handle_mongo_exception("trans_traces insert: " + json, __LINE__); + } catch( ... ) { + handle_mongo_exception( "action traces insert", __LINE__ ); } + } void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr& bs ) { @@ -998,23 +1005,6 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ _block_states.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); } - if( store_block_states ) { - auto block_states = mongo_conn[db_name][block_states_col]; - auto ir_block = find_block( block_states, block_id_str ); - if( !ir_block ) { - _process_accepted_block( bs ); - ir_block = find_block( block_states, block_id_str ); - if( !ir_block ) return; // should never happen - } - - auto update_doc = make_document( kvp( "$set", make_document( kvp( "irreversible", b_bool{true} ), - kvp( "validated", b_bool{bs->validated} ), - kvp( "in_current_chain", b_bool{bs->in_current_chain} ), - kvp( "updatedAt", b_date{now} ) ) ) ); - - block_states.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); - } - if( store_transactions ) { const auto block_num = bs->block->block_num(); bool transactions_in_block = false; From 0754e85ca39081f61e6229aa07af01a8e11aa595 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 12 Sep 2018 12:20:52 -0500 Subject: [PATCH 119/194] Added flag to temporarily behave like the validation mode is set to light when a block is produced by a trusted producer. GH #5268 --- libraries/chain/controller.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 26d54d7da70..dd2341ced20 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1018,14 +1018,20 @@ struct controller_impl { void push_block( const signed_block_ptr& b, controller::block_status s ) { - // idump((fc::json::to_pretty_string(*b))); EOS_ASSERT(!pending, block_validate_exception, "it is not valid to push a block when there is a pending block"); + + auto reset_prod_light_validation = fc::make_scoped_exit([this]() { + conf.trusted_producer_light_validation = false; + }); try { EOS_ASSERT( b, block_validate_exception, "trying to push empty block" ); EOS_ASSERT( s != controller::block_status::incomplete, block_validate_exception, "invalid block status for a completed block" ); emit( self.pre_accepted_block, b ); bool trust = !conf.force_all_checks && (s == controller::block_status::irreversible || s == controller::block_status::validated); auto new_header_state = fork_db.add( b, trust ); + if (conf.trusted_producers.count(b->producer)) { + conf.trusted_producer_light_validation = true; + }; emit( self.accepted_block_header, new_header_state ); // on replay irreversible is not emitted by fork database, so emit it explicitly here if( s == controller::block_status::irreversible ) @@ -1657,13 +1663,14 @@ bool controller::light_validation_allowed(bool replay_opts_disabled_by_policy) c return false; } - auto pb_status = my->pending->_block_status; + const auto pb_status = my->pending->_block_status; // in a pending irreversible or previously validated block and we have forcing all checks - bool consider_skipping_on_replay = (pb_status == block_status::irreversible || pb_status == block_status::validated) && !replay_opts_disabled_by_policy; + const bool consider_skipping_on_replay = (pb_status == block_status::irreversible || pb_status == block_status::validated) && !replay_opts_disabled_by_policy; // OR in a signed block and in light validation mode - bool consider_skipping_on_validate = (pb_status == block_status::complete && my->conf.block_validation_mode == validation_mode::LIGHT); + const bool consider_skipping_on_validate = (pb_status == block_status::complete && + (my->conf.block_validation_mode == validation_mode::LIGHT || my->conf.trusted_producer_light_validation)); return consider_skipping_on_replay || consider_skipping_on_validate; } From 5e0bf2b16dfef08d12a24fa7f07b2fb97e781590 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 12 Sep 2018 13:37:44 -0500 Subject: [PATCH 120/194] Add account_delta type and use flat_set so json is a list of objects instead of array of arrays. --- libraries/chain/apply_context.cpp | 13 ++++++++++--- .../chain/include/eosio/chain/apply_context.hpp | 2 +- libraries/chain/include/eosio/chain/trace.hpp | 14 ++++++++++++-- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 00a6baf0df2..663badc1cba 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -79,8 +79,8 @@ action_trace apply_context::exec_one() t.block_num = control.pending_block_state()->block_num; t.block_time = control.pending_block_time(); t.producer_block_id = control.pending_producer_block_id(); - t.account_ram_delta = std::move( _account_ram_delta ); - _account_ram_delta.clear(); + t.account_ram_deltas = std::move( _account_ram_deltas ); + _account_ram_deltas.clear(); t.act = act; t.console = _pending_console_output.str(); @@ -641,7 +641,14 @@ uint64_t apply_context::next_auth_sequence( account_name actor ) { void apply_context::add_ram_usage( account_name account, int64_t ram_delta ) { trx_context.add_ram_usage( account, ram_delta ); - _account_ram_delta[account] += ram_delta; + + account_delta delta{account, ram_delta}; + auto itr = _account_ram_deltas.find( delta ); + if( itr == _account_ram_deltas.end() ) { + _account_ram_deltas.emplace( delta ); + } else { + itr->delta += ram_delta; + } } diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 51bc4c1744b..8a4f98a7caa 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -609,7 +609,7 @@ class apply_context { vector _inline_actions; ///< queued inline messages vector _cfa_inline_actions; ///< queued inline messages std::ostringstream _pending_console_output; - flat_map _account_ram_delta; + flat_set _account_ram_deltas; ///< flat_set of account_delta so json is an array of objects //bytes _cached_trx; }; diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index aadcd47947f..e4f27579756 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -10,6 +10,13 @@ namespace eosio { namespace chain { + struct account_delta { + account_name account; + int64_t delta; + + friend bool operator<( const account_delta& lhs, const account_delta& rhs ) { return lhs.account < rhs.account; } + }; + struct base_action_trace { base_action_trace( const action_receipt& r ):receipt(r){} base_action_trace(){} @@ -25,7 +32,7 @@ namespace eosio { namespace chain { uint32_t block_num = 0; block_timestamp_type block_time; fc::optional producer_block_id; - flat_map account_ram_delta; + flat_set account_ram_deltas; }; struct action_trace : public base_action_trace { @@ -55,9 +62,12 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain +FC_REFLECT( eosio::chain::account_delta, + (account)(delta) ) + FC_REFLECT( eosio::chain::base_action_trace, (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) - (block_num)(block_time)(producer_block_id)(account_ram_delta) ) + (block_num)(block_time)(producer_block_id)(account_ram_deltas) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) From a972330204e8042b4ae3cb112e630d841f54006d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 12 Sep 2018 14:43:04 -0500 Subject: [PATCH 121/194] Changed validating_tester to allow setting trusted_producers and to manually produce blocks on the main chain and then manually pass the block to the validating_node. GH #5268 --- .../testing/include/eosio/testing/tester.hpp | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index d502adefd15..e58775b84cd 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -308,14 +308,17 @@ namespace eosio { namespace testing { try { if( num_blocks_to_producer_before_shutdown > 0 ) produce_blocks( num_blocks_to_producer_before_shutdown ); - BOOST_REQUIRE_EQUAL( validate(), true ); + if (!skip_validate) + BOOST_REQUIRE_EQUAL( validate(), true ); } catch( const fc::exception& e ) { wdump((e.to_detail_string())); } } controller::config vcfg; - validating_tester() { + static controller::config default_config() { + fc::temp_directory tempdir; + controller::config vcfg; vcfg.blocks_dir = tempdir.path() / std::string("v_").append(config::default_blocks_dir_name); vcfg.state_dir = tempdir.path() / std::string("v_").append(config::default_state_dir_name); vcfg.state_size = 1024*1024*8; @@ -333,7 +336,13 @@ namespace eosio { namespace testing { else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) vcfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; } + return vcfg; + } + + validating_tester(const flat_set& trusted_producers = flat_set()) { + vcfg = default_config(); + vcfg.trusted_producers = trusted_producers; validating_node = std::make_unique(vcfg); validating_node->startup(); @@ -362,6 +371,14 @@ namespace eosio { namespace testing { return sb; } + signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ ) { + return _produce_block(skip_time, false, skip_flag | 2); + } + + void validate_push_block(const signed_block_ptr& sb) { + validating_node->push_block( sb ); + } + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { control->abort_block(); auto sb = _produce_block(skip_time, true, skip_flag | 2); @@ -393,6 +410,7 @@ namespace eosio { namespace testing { unique_ptr validating_node; uint32_t num_blocks_to_producer_before_shutdown = 0; + bool skip_validate = false; }; /** From 05bb716ffe2550ac89441127a89822df7fd18ade Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 12 Sep 2018 14:44:34 -0500 Subject: [PATCH 122/194] Added tests to verify if a block with a invalid transaction was accepted by a chain when the block was produced by a trusted producer. GH #5268 --- unittests/block_tests.cpp | 114 +++++++++++++++++++++++++++++++++++++- 1 file changed, 113 insertions(+), 1 deletion(-) diff --git a/unittests/block_tests.cpp b/unittests/block_tests.cpp index f196dbdae93..6c4129510c3 100644 --- a/unittests/block_tests.cpp +++ b/unittests/block_tests.cpp @@ -47,7 +47,119 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_test) [] (const fc::exception &e)->bool { return e.code() == account_name_exists_exception::code_value ; }) ; - + +} + +std::pair corrupt_trx_in_block(validating_tester& main, account_name act_name) { + // First we create a valid block with valid transaction + main.create_account(act_name); + signed_block_ptr b = main.produce_block_no_validation(); + + // Make a copy of the valid block and corrupt the transaction + auto copy_b = std::make_shared(*b); + auto signed_tx = copy_b->transactions.back().trx.get().get_signed_transaction(); + // Corrupt one signature + signed_tx.signatures.clear(); + signed_tx.sign(main.get_private_key(act_name, "active"), main.control->get_chain_id()); + + // Replace the valid transaction with the invalid transaction + auto invalid_packed_tx = packed_transaction(signed_tx); + copy_b->transactions.back().trx = invalid_packed_tx; + + // Re-calculate the transaction merkle + vector trx_digests; + const auto& trxs = copy_b->transactions; + trx_digests.reserve( trxs.size() ); + for( const auto& a : trxs ) + trx_digests.emplace_back( a.digest() ); + copy_b->transaction_mroot = merkle( move(trx_digests) ); + + // Re-sign the block + auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule_hash) ); + copy_b->producer_signature = main.get_private_key(b->producer, "active").sign(sig_digest); + return std::pair(b, copy_b); +} + +// verify that a block with a transaction with an incorrect signature, is blindly accepted from a trusted producer +BOOST_AUTO_TEST_CASE(trusted_producer_test) +{ + flat_set trusted_producers = { N(defproducera), N(defproducerc) }; + validating_tester main(trusted_producers); + // only using validating_tester to keep the 2 chains in sync, not to validate that the validating_node matches the main node, + // since it won't be + main.skip_validate = true; + + // First we create a valid block with valid transaction + std::set producers = { N(defproducera), N(defproducerb), N(defproducerc), N(defproducerd) }; + for (auto prod : producers) + main.create_account(prod); + auto b = main.produce_block(); + + std::vector schedule(producers.cbegin(), producers.cend()); + auto trace = main.set_producers(schedule); + + while (b->producer != N(defproducera)) { + b = main.produce_block(); + } + + auto blocks = corrupt_trx_in_block(main, N(tstproducera)); + main.validate_push_block( blocks.second ); +} + +// like trusted_producer_test, except verify that any entry in the trusted_producer list is accepted +BOOST_AUTO_TEST_CASE(trusted_producer_verify_2nd_test) +{ + flat_set trusted_producers = { N(defproducera), N(defproducerc) }; + validating_tester main(trusted_producers); + // only using validating_tester to keep the 2 chains in sync, not to validate that the validating_node matches the main node, + // since it won't be + main.skip_validate = true; + + // First we create a valid block with valid transaction + std::set producers = { N(defproducera), N(defproducerb), N(defproducerc), N(defproducerd) }; + for (auto prod : producers) + main.create_account(prod); + auto b = main.produce_block(); + + std::vector schedule(producers.cbegin(), producers.cend()); + auto trace = main.set_producers(schedule); + + while (b->producer != N(defproducerc)) { + b = main.produce_block(); + } + + auto blocks = corrupt_trx_in_block(main, N(tstproducera)); + main.validate_push_block( blocks.second ); +} + +// verify that a block with a transaction with an incorrect signature, is rejected if it is not from a trusted producer +BOOST_AUTO_TEST_CASE(untrusted_producer_test) +{ + flat_set trusted_producers = { N(defproducera), N(defproducerc) }; + validating_tester main(trusted_producers); + // only using validating_tester to keep the 2 chains in sync, not to validate that the validating_node matches the main node, + // since it won't be + main.skip_validate = true; + + // First we create a valid block with valid transaction + std::set producers = { N(defproducera), N(defproducerb), N(defproducerc), N(defproducerd) }; + for (auto prod : producers) + main.create_account(prod); + auto b = main.produce_block(); + + std::vector schedule(producers.cbegin(), producers.cend()); + auto trace = main.set_producers(schedule); + + while (b->producer != N(defproducerb)) { + b = main.produce_block(); + } + + auto blocks = corrupt_trx_in_block(main, N(tstproducera)); + BOOST_REQUIRE_EXCEPTION(main.validate_push_block( blocks.second ), fc::exception , + [] (const fc::exception &e)->bool { + return e.code() == unsatisfied_authorization::code_value ; + }) ; } BOOST_AUTO_TEST_SUITE_END() From e463037fde237e6a2633fcdf1a1ae1ecbd77684e Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Wed, 12 Sep 2018 18:37:15 -0400 Subject: [PATCH 123/194] abi_serializer: variants --- libraries/chain/abi_serializer.cpp | 27 +++++++ .../chain/include/eosio/chain/abi_def.hpp | 57 ++++++++++++--- .../include/eosio/chain/abi_serializer.hpp | 11 +-- .../chain/include/eosio/chain/exceptions.hpp | 2 + unittests/abi_tests.cpp | 70 +++++++++++++++++++ 5 files changed, 153 insertions(+), 14 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 60303a5268f..e2d3f198c2b 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -110,6 +110,7 @@ namespace eosio { namespace chain { actions.clear(); tables.clear(); error_messages.clear(); + variants.clear(); for( const auto& st : abi.structs ) structs[st.name] = st; @@ -129,6 +130,9 @@ namespace eosio { namespace chain { for( const auto& e : abi.error_messages ) error_messages[e.error_code] = e.error_msg; + for( const auto& v : abi.variants.value ) + variants[v.name] = v; + /** * The ABI vector may contain duplicates which would make it * an invalid ABI @@ -138,6 +142,7 @@ namespace eosio { namespace chain { EOS_ASSERT( actions.size() == abi.actions.size(), duplicate_abi_action_def_exception, "duplicate action definition detected" ); EOS_ASSERT( tables.size() == abi.tables.size(), duplicate_abi_table_def_exception, "duplicate table definition detected" ); EOS_ASSERT( error_messages.size() == abi.error_messages.size(), duplicate_abi_err_msg_def_exception, "duplicate error message definition detected" ); + EOS_ASSERT( variants.size() == abi.variants.value.size(), duplicate_abi_variant_def_exception, "duplicate variant definition detected" ); validate(deadline, max_serialization_time); } @@ -190,6 +195,7 @@ namespace eosio { namespace chain { if( built_in_types.find(type) != built_in_types.end() ) return true; if( typedefs.find(type) != typedefs.end() ) return _is_type(typedefs.find(type)->second, recursion_depth, deadline, max_serialization_time); if( structs.find(type) != structs.end() ) return true; + if( variants.find(type) != variants.end() ) return true; return false; } @@ -230,6 +236,12 @@ namespace eosio { namespace chain { EOS_ASSERT(_is_type(field.type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",field.type) ); } FC_CAPTURE_AND_RETHROW( (field) ) } } FC_CAPTURE_AND_RETHROW( (s) ) } + for( const auto& s : variants ) { try { + for( const auto& type : s.second.types ) { try { + EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + EOS_ASSERT(_is_type(type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",type) ); + } FC_CAPTURE_AND_RETHROW( (type) ) } + } FC_CAPTURE_AND_RETHROW( (s) ) } for( const auto& a : actions ) { try { EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); EOS_ASSERT(_is_type(a.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",a.second) ); @@ -297,6 +309,14 @@ namespace eosio { namespace chain { char flag; fc::raw::unpack(stream, flag); return flag ? _binary_to_variant(ftype, stream, recursion_depth, deadline, max_serialization_time) : fc::variant(); + } else { + auto v = variants.find(rtype); + if( v != variants.end() ) { + fc::unsigned_int select; + fc::raw::unpack(stream, select); + EOS_ASSERT( (size_t)select < v->second.types.size(), unpack_exception, "Invalid packed variant" ); + return vector{v->second.types[select], _binary_to_variant(v->second.types[select], stream, recursion_depth, deadline, max_serialization_time)}; + } } fc::mutable_variant_object mvo; @@ -330,6 +350,13 @@ namespace eosio { namespace chain { for (const auto& var : vars) { _variant_to_binary(fundamental_type(rtype), var, ds, recursion_depth, deadline, max_serialization_time); } + } else if ( variants.find(rtype) != variants.end() ) { + EOS_ASSERT( var.is_array() && var.size() == 2 && var[size_t(0)].is_string(), abi_exception, "expected array containing variant" ); + auto& v = variants.find(rtype)->second; + auto it = find(v.types.begin(), v.types.end(), var[size_t(0)].get_string()); + EOS_ASSERT( it != v.types.end(), abi_exception, "type is not valid within this variant" ); + fc::raw::pack(ds, fc::unsigned_int(it - v.types.begin())); + _variant_to_binary( *it, var[size_t(1)], ds, recursion_depth, deadline, max_serialization_time ); } else { const auto& st = get_struct(rtype); diff --git a/libraries/chain/include/eosio/chain/abi_def.hpp b/libraries/chain/include/eosio/chain/abi_def.hpp index 3782c3f7e5b..89f4224d2e9 100644 --- a/libraries/chain/include/eosio/chain/abi_def.hpp +++ b/libraries/chain/include/eosio/chain/abi_def.hpp @@ -94,6 +94,16 @@ struct error_message { string error_msg; }; +struct variant_def { + type_name name; + vector types; +}; + +template +struct may_not_exist { + T value{}; +}; + struct abi_def { abi_def() = default; abi_def(const vector& types, const vector& structs, const vector& actions, const vector& tables, const vector& clauses, const vector& error_msgs) @@ -106,14 +116,15 @@ struct abi_def { ,error_messages(error_msgs) {} - string version = "eosio::abi/1.0"; - vector types; - vector structs; - vector actions; - vector tables; - vector ricardian_clauses; - vector error_messages; - extensions_type abi_extensions; + string version = "eosio::abi/1.0"; + vector types; + vector structs; + vector actions; + vector tables; + vector ricardian_clauses; + vector error_messages; + extensions_type abi_extensions; + may_not_exist> variants; }; abi_def eosio_contract_abi(const abi_def& eosio_system_abi); @@ -121,6 +132,33 @@ vector common_type_defs(); } } /// namespace eosio::chain +namespace fc { + +template +datastream& operator << (datastream& s, const eosio::chain::may_not_exist& v) { + raw::pack(s, v.value); + return s; +} + +template +datastream& operator >> (datastream& s, eosio::chain::may_not_exist& v) { + if (s.remaining()) + raw::unpack(s, v.value); + return s; +} + +template +void to_variant(const eosio::chain::may_not_exist& e, fc::variant& v) { + to_variant( e.value, v); +} + +template +void from_variant(const fc::variant& v, eosio::chain::may_not_exist& e) { + from_variant( v, e.value ); +} + +} // namespace fc + FC_REFLECT( eosio::chain::type_def , (new_type_name)(type) ) FC_REFLECT( eosio::chain::field_def , (name)(type) ) FC_REFLECT( eosio::chain::struct_def , (name)(base)(fields) ) @@ -128,5 +166,6 @@ FC_REFLECT( eosio::chain::action_def , (name)(type)(ricard FC_REFLECT( eosio::chain::table_def , (name)(index_type)(key_names)(key_types)(type) ) FC_REFLECT( eosio::chain::clause_pair , (id)(body) ) FC_REFLECT( eosio::chain::error_message , (error_code)(error_msg) ) +FC_REFLECT( eosio::chain::variant_def , (name)(types) ) FC_REFLECT( eosio::chain::abi_def , (version)(types)(structs)(actions)(tables) - (ricardian_clauses)(error_messages)(abi_extensions) ) + (ricardian_clauses)(error_messages)(abi_extensions)(variants) ) diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 1e5bca6b5b3..c2528618390 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -95,11 +95,12 @@ struct abi_serializer { private: - map typedefs; - map structs; - map actions; - map tables; - map error_messages; + map typedefs; + map structs; + map actions; + map tables; + map error_messages; + map variants; map> built_in_types; void configure_built_in_types(); diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index fd49e930af5..5889a582dba 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -416,6 +416,8 @@ namespace eosio { namespace chain { 3015013, "Unpack data exception" ) FC_DECLARE_DERIVED_EXCEPTION( pack_exception, abi_exception, 3015014, "Pack data exception" ) + FC_DECLARE_DERIVED_EXCEPTION( duplicate_abi_variant_def_exception, abi_exception, + 3015015, "Duplicate variant definition in the ABI" ) FC_DECLARE_DERIVED_EXCEPTION( contract_exception, chain_exception, 3160000, "Contract exception" ) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index 3c6b4d55a01..e4635eb46f2 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -50,6 +50,17 @@ fc::variant verify_byte_round_trip_conversion( const abi_serializer& abis, const return var2; } +void verify_round_trip_conversion( const abi_serializer& abis, const type_name& type, const std::string& json, const std::string& hex ) +{ + auto var = fc::json::from_string(json); + auto bytes = abis.variant_to_binary(type, var, max_serialization_time); + BOOST_REQUIRE_EQUAL(fc::to_hex(bytes), hex); + auto var2 = abis.binary_to_variant(type, bytes, max_serialization_time); + BOOST_REQUIRE_EQUAL(fc::json::to_string(var2), json); + auto bytes2 = abis.variant_to_binary(type, var2, max_serialization_time); + BOOST_REQUIRE_EQUAL(fc::to_hex(bytes2), hex); +} + auto get_resolver(const abi_def& abi = abi_def()) { return [&abi](const account_name &name) -> optional { @@ -3471,4 +3482,63 @@ BOOST_AUTO_TEST_CASE(abi_deep_structs_validate) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(variants) +{ + auto duplicate_variant_abi = R"({ + "variants": [ + {"name": "v1", "types": ["int8", "string", "bool"]}, + {"name": "v1", "types": ["int8", "string", "bool"]}, + ], + })"; + + auto variant_abi_invalid_type = R"({ + "variants": [ + {"name": "v1", "types": ["int91", "string", "bool"]}, + ], + })"; + + auto variant_abi = R"({ + "types": [ + {"new_type_name": "foo", "type": "s"}, + {"new_type_name": "bar", "type": "s"}, + ], + "structs": [ + {"name": "s", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"}, + ]} + ], + "variants": [ + {"name": "v1", "types": ["int8", "string", "int16"]}, + {"name": "v2", "types": ["foo", "bar"]}, + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(variant_abi).as(), max_serialization_time ); + + // duplicate variant definition detected + BOOST_CHECK_THROW( abi_serializer( fc::json::from_string(duplicate_variant_abi).as(), max_serialization_time ), duplicate_abi_variant_def_exception ); + + // invalid_type_inside_abi + BOOST_CHECK_THROW( abi_serializer( fc::json::from_string(variant_abi_invalid_type).as(), max_serialization_time ), invalid_type_inside_abi ); + + // expected array containing variant + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(9)"), max_serialization_time), abi_exception ); + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"([4])"), max_serialization_time), abi_exception ); + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"([4, 5])"), max_serialization_time), abi_exception ); + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(["4", 5, 6])"), max_serialization_time), abi_exception ); + + // type is not valid within this variant + BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(["int9", 21])"), max_serialization_time), abi_exception ); + + verify_round_trip_conversion(abis, "v1", R"(["int8",21])", "0015"); + verify_round_trip_conversion(abis, "v1", R"(["string","abcd"])", "010461626364"); + verify_round_trip_conversion(abis, "v1", R"(["int16",3])", "020300"); + verify_round_trip_conversion(abis, "v1", R"(["int16",4])", "020400"); + verify_round_trip_conversion(abis, "v2", R"(["foo",{"i0":5,"i1":6}])", "000506"); + verify_round_trip_conversion(abis, "v2", R"(["bar",{"i0":5,"i1":6}])", "010506"); + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_SUITE_END() From 3a95b491193192d23c379ea468b94c80527e89fb Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 12 Sep 2018 19:41:21 -0400 Subject: [PATCH 124/194] fix dereference empty optional bug in api_tests/transaction_tests The applied_transaction signal can be given a transaction_trace for a failed input transaction in which the receipt optional is empty. Disconnect the connection to the applied_transaction signal that was only meant for the send_transaction_trigger_error_handler test after that particular unit test is completed. This ensures that the stale signal handling lambda function is not called when applying the later transactions in the test. Furthermore, the signal handler should still check if the receipt is empty before attempting to dereference it even for the send_transaction_trigger_error_handler unit test, since the initial transaction to send the deferred transaction could fail, and it is better for the official failure in the Boost tests to be from BOOST_REQUIRE(trace) rather than from a SIGABRT. --- unittests/api_tests.cpp | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index b80683b211f..655c1b18d77 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1013,15 +1013,16 @@ BOOST_FIXTURE_TEST_CASE(transaction_tests, TESTER) { try { ); { - produce_blocks(10); - transaction_trace_ptr trace; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->receipt->status != transaction_receipt::executed) { trace = t; } } ); + produce_blocks(10); + transaction_trace_ptr trace; + auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->receipt && t->receipt->status != transaction_receipt::executed) { trace = t; } } ); - // test error handling on deferred transaction failure - CALL_TEST_FUNCTION(*this, "test_transaction", "send_transaction_trigger_error_handler", {}); + // test error handling on deferred transaction failure + CALL_TEST_FUNCTION(*this, "test_transaction", "send_transaction_trigger_error_handler", {}); - BOOST_CHECK(trace); - BOOST_CHECK_EQUAL(trace->receipt->status, transaction_receipt::soft_fail); + BOOST_REQUIRE(trace); + BOOST_CHECK_EQUAL(trace->receipt->status, transaction_receipt::soft_fail); + c.disconnect(); } // test test_transaction_size From 93717b9848901df7e921c47e4efecba14c76b82e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 12 Sep 2018 18:53:11 -0500 Subject: [PATCH 125/194] Fix exception message wording. GH #5633 --- libraries/chain/apply_context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 0bbfb8e3aec..44499ba0077 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -102,7 +102,7 @@ void apply_context::exec() if( _cfa_inline_actions.size() > 0 || _inline_actions.size() > 0 ) { EOS_ASSERT( recurse_depth < control.get_global_properties().configuration.max_inline_action_depth, - transaction_exception, "inline action recursion depth reached" ); + transaction_exception, "max inline action depth per transaction reached" ); } for( const auto& inline_action : _cfa_inline_actions ) { From 5124e2a5591af9404910206410198591b82f17de Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 12 Sep 2018 20:31:25 -0500 Subject: [PATCH 126/194] Fix unit test for exception message change. GH #5633 --- unittests/api_tests.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index b80683b211f..77a161025d5 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1041,7 +1041,7 @@ BOOST_FIXTURE_TEST_CASE(transaction_tests, TESTER) { try { // test send_action_recurse BOOST_CHECK_EXCEPTION(CALL_TEST_FUNCTION(*this, "test_transaction", "send_action_recurse", {}), eosio::chain::transaction_exception, [](const eosio::chain::transaction_exception& e) { - return expect_assert_message(e, "inline action recursion depth reached"); + return expect_assert_message(e, "max inline action depth per transaction reached"); } ); From 84443f0459c2f556118194802d8b372cd2bcf35c Mon Sep 17 00:00:00 2001 From: Tengfei Niu Date: Thu, 13 Sep 2018 11:45:13 +0800 Subject: [PATCH 127/194] Fix typo --- programs/cleos/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 6c425a24203..db79b16164d 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -2002,7 +2002,7 @@ int main( int argc, char** argv ) { getTable->add_option( "-l,--limit", limit, localized("The maximum number of rows to return") ); getTable->add_option( "-k,--key", table_key, localized("Deprecated") ); getTable->add_option( "-L,--lower", lower, localized("JSON representation of lower bound value of key, defaults to first") ); - getTable->add_option( "-U,--upper", upper, localized("JSON representation of upper bound value value of key, defaults to last") ); + getTable->add_option( "-U,--upper", upper, localized("JSON representation of upper bound value of key, defaults to last") ); getTable->add_option( "--index", index_position, localized("Index number, 1 - primary (first), 2 - secondary index (in order defined by multi_index), 3 - third index, etc.\n" "\t\t\t\tNumber or name of index can be specified, e.g. 'secondary' or '2'.")); From c7a5a9e9e8c775cb125571db484aef8db9b71aba Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 13 Sep 2018 08:32:52 -0500 Subject: [PATCH 128/194] Pull Request change to reset trusted producer light validation flag back to original state. GH #5268 --- libraries/chain/controller.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index dd2341ced20..72f756de455 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1020,8 +1020,8 @@ struct controller_impl { void push_block( const signed_block_ptr& b, controller::block_status s ) { EOS_ASSERT(!pending, block_validate_exception, "it is not valid to push a block when there is a pending block"); - auto reset_prod_light_validation = fc::make_scoped_exit([this]() { - conf.trusted_producer_light_validation = false; + auto reset_prod_light_validation = fc::make_scoped_exit([old_value=conf.trusted_producer_light_validation, this]() { + conf.trusted_producer_light_validation = old_value; }); try { EOS_ASSERT( b, block_validate_exception, "trying to push empty block" ); From edcebd040992a029a6e6aa6c9afb6e538af93077 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 13 Sep 2018 10:17:54 -0400 Subject: [PATCH 129/194] Test round-tripping abi file containing variant --- unittests/abi_tests.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index e4635eb46f2..bae08c4b9ce 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -3515,7 +3515,11 @@ BOOST_AUTO_TEST_CASE(variants) })"; try { - abi_serializer abis( fc::json::from_string(variant_abi).as(), max_serialization_time ); + // round-trip abi through multiple formats + // json -> variant -> abi_def -> bin + auto bin = fc::raw::pack(fc::json::from_string(variant_abi).as()); + // bin -> abi_def -> variant -> abi_def + abi_serializer abis(variant(fc::raw::unpack(bin)).as(), max_serialization_time ); // duplicate variant definition detected BOOST_CHECK_THROW( abi_serializer( fc::json::from_string(duplicate_variant_abi).as(), max_serialization_time ), duplicate_abi_variant_def_exception ); From 45ecf195619c501687a5c5f95bcebe4521e0d4bd Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 13 Sep 2018 11:34:03 -0400 Subject: [PATCH 130/194] update fc submodule --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index b9d51de0dc0..347f86ff170 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit b9d51de0dc09ad5e48ef3a6179ec579b351ae6cc +Subproject commit 347f86ff17013f5010f5ad8142ad99c9bd0cd87a From 9654a0eabdd79295540312ab2dc8c7d1d026d187 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 10:59:27 -0500 Subject: [PATCH 131/194] Remove dead code added via bad merge --- libraries/chain/include/eosio/chain/transaction_context.hpp | 1 - libraries/chain/transaction_context.cpp | 1 - 2 files changed, 2 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index bd0871334df..3175994dedd 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -74,7 +74,6 @@ namespace eosio { namespace chain { vector executed; flat_set bill_to_accounts; flat_set validate_ram_usage; - flat_map account_ram_delta; // reset for each action /// the maximum number of virtual CPU instructions of the transaction that can be safely billed to the billable accounts uint64_t initial_max_billable_cpu = 0; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index a657109a078..dd58f0364ec 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -386,7 +386,6 @@ namespace eosio { namespace chain { if( ram_delta > 0 ) { validate_ram_usage.insert( account ); } - account_ram_delta[account] += ram_delta; } uint32_t transaction_context::update_billed_cpu_time( fc::time_point now ) { From fd8766471463892f0a5736afdca73be12479010a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 11:36:53 -0500 Subject: [PATCH 132/194] Add context_free to base_action_trace --- libraries/chain/apply_context.cpp | 1 + libraries/chain/include/eosio/chain/trace.hpp | 1 + 2 files changed, 2 insertions(+) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 663badc1cba..250563d8382 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -82,6 +82,7 @@ action_trace apply_context::exec_one() t.account_ram_deltas = std::move( _account_ram_deltas ); _account_ram_deltas.clear(); t.act = act; + t.context_free = context_free; t.console = _pending_console_output.str(); trx_context.executed.emplace_back( move(r) ); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index e4f27579756..faee5cd70a9 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -23,6 +23,7 @@ namespace eosio { namespace chain { action_receipt receipt; action act; + bool context_free = false; fc::microseconds elapsed; uint64_t cpu_usage = 0; string console; From b9a0b9390b76c2984c724f3bc75b9ef6798acf5f Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 13 Sep 2018 11:37:16 -0500 Subject: [PATCH 133/194] Handle delay in keosd startup and fix bad error statement. GH #5199 --- tests/WalletMgr.py | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 870cd41a0da..829ac773dd2 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -44,7 +44,7 @@ def launch(self): self.__walletPid=popen.pid # Give keosd time to warm up - time.sleep(1) + time.sleep(2) return True def create(self, name, accounts=None, exitOnError=True): @@ -53,20 +53,31 @@ def create(self, name, accounts=None, exitOnError=True): if Utils.Debug: Utils.Print("Wallet \"%s\" already exists. Returning same." % name) return wallet p = re.compile(r'\n\"(\w+)\"\n', re.MULTILINE) - cmd="%s %s wallet create --name %s --to-console" % (Utils.EosClientPath, self.endpointArgs, name) + cmdDesc="wallet create" + cmd="%s %s %s --name %s --to-console" % (Utils.EosClientPath, self.endpointArgs, cmdDesc, name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) retStr=None - try: - retStr=Utils.checkOutput(cmd.split()) - except subprocess.CalledProcessError as ex: - msg=ex.output.decode("utf-8") - msg="ERROR: Failed to import account owner key %s. %s" % (account.ownerPrivateKey, msg) - if exitOnError: - Utils.errorExit("%s" % (msg)) - Utils.Print("%s" % (msg)) - return None + maxRetryCount=4 + retryCount=0 + while True: + try: + retStr=Utils.checkOutput(cmd.split()) + break + except subprocess.CalledProcessError as ex: + retryCount+=1 + if retryCount Date: Thu, 13 Sep 2018 12:31:43 -0500 Subject: [PATCH 134/194] Add context_free to refection --- libraries/chain/include/eosio/chain/trace.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index faee5cd70a9..fa5e18b28b3 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -67,7 +67,7 @@ FC_REFLECT( eosio::chain::account_delta, (account)(delta) ) FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) + (receipt)(act)(context_free)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) (block_num)(block_time)(producer_block_id)(account_ram_deltas) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, From 92f7e950f021fe26217c6d5072fd519e5ec3a128 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 13 Sep 2018 13:50:49 -0400 Subject: [PATCH 135/194] abi_serializer: binary extensions ($) --- libraries/chain/abi_serializer.cpp | 27 ++++++++--- .../include/eosio/chain/abi_serializer.hpp | 1 + unittests/abi_tests.cpp | 46 ++++++++++++++++++- 3 files changed, 66 insertions(+), 8 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index e2d3f198c2b..0f525d96899 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -188,6 +188,13 @@ namespace eosio { namespace chain { } } + type_name abi_serializer::_remove_bin_extension(const type_name& type) { + if( ends_with(type, "$") ) + return type.substr(0, type.size()-1); + else + return type; + } + bool abi_serializer::_is_type(const type_name& rtype, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const { EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); if( ++recursion_depth > max_recursion_depth) return false; @@ -233,7 +240,7 @@ namespace eosio { namespace chain { } for( const auto& field : s.second.fields ) { try { EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(field.type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",field.type) ); + EOS_ASSERT(_is_type(_remove_bin_extension(field.type), 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",field.type) ); } FC_CAPTURE_AND_RETHROW( (field) ) } } FC_CAPTURE_AND_RETHROW( (s) ) } for( const auto& s : variants ) { try { @@ -276,7 +283,9 @@ namespace eosio { namespace chain { _binary_to_variant(resolve_type(st.base), stream, obj, recursion_depth, deadline, max_serialization_time); } for( const auto& field : st.fields ) { - obj( field.name, _binary_to_variant(resolve_type(field.type), stream, recursion_depth, deadline, max_serialization_time) ); + if( !stream.remaining() && ends_with(field.type, "$") ) + continue; + obj( field.name, _binary_to_variant(resolve_type(_remove_bin_extension(field.type)), stream, recursion_depth, deadline, max_serialization_time) ); } } @@ -366,11 +375,15 @@ namespace eosio { namespace chain { if( st.base != type_name() ) { _variant_to_binary(resolve_type(st.base), var, ds, recursion_depth, deadline, max_serialization_time); } + bool missing_extension = false; for( const auto& field : st.fields ) { if( vo.contains( string(field.name).c_str() ) ) { - _variant_to_binary(field.type, vo[field.name], ds, recursion_depth, deadline, max_serialization_time); - } - else { + if( missing_extension ) + EOS_THROW( pack_exception, "Unexpected '${f}' in variant object", ("f",field.name) ); + _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, recursion_depth, deadline, max_serialization_time); + } else if( ends_with(field.type, "$") ) { + missing_extension = true; + } else { _variant_to_binary(field.type, fc::variant(), ds, recursion_depth, deadline, max_serialization_time); /// TODO: default construct field and write it out EOS_THROW( pack_exception, "Missing '${f}' in variant object", ("f",field.name) ); @@ -383,7 +396,9 @@ namespace eosio { namespace chain { if (va.size() > 0) { for( const auto& field : st.fields ) { if( va.size() > i ) - _variant_to_binary(field.type, va[i], ds, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, recursion_depth, deadline, max_serialization_time); + else if( ends_with(field.type, "$") ) + break; else _variant_to_binary(field.type, fc::variant(), ds, recursion_depth, deadline, max_serialization_time); ++i; diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index c2528618390..9c2a1e903b9 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -118,6 +118,7 @@ struct abi_serializer { void _binary_to_variant(const type_name& type, fc::datastream& stream, fc::mutable_variant_object& obj, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + static type_name _remove_bin_extension(const type_name& type); bool _is_type(const type_name& type, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; void validate(const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index bae08c4b9ce..f6e009ca715 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -50,17 +50,22 @@ fc::variant verify_byte_round_trip_conversion( const abi_serializer& abis, const return var2; } -void verify_round_trip_conversion( const abi_serializer& abis, const type_name& type, const std::string& json, const std::string& hex ) +void verify_round_trip_conversion( const abi_serializer& abis, const type_name& type, const std::string& json, const std::string& hex, const std::string& expected_json ) { auto var = fc::json::from_string(json); auto bytes = abis.variant_to_binary(type, var, max_serialization_time); BOOST_REQUIRE_EQUAL(fc::to_hex(bytes), hex); auto var2 = abis.binary_to_variant(type, bytes, max_serialization_time); - BOOST_REQUIRE_EQUAL(fc::json::to_string(var2), json); + BOOST_REQUIRE_EQUAL(fc::json::to_string(var2), expected_json); auto bytes2 = abis.variant_to_binary(type, var2, max_serialization_time); BOOST_REQUIRE_EQUAL(fc::to_hex(bytes2), hex); } +void verify_round_trip_conversion( const abi_serializer& abis, const type_name& type, const std::string& json, const std::string& hex ) +{ + verify_round_trip_conversion( abis, type, json, hex, json ); +} + auto get_resolver(const abi_def& abi = abi_def()) { return [&abi](const account_name &name) -> optional { @@ -3545,4 +3550,41 @@ BOOST_AUTO_TEST_CASE(variants) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(extend) +{ + auto abi = R"({ + "structs": [ + {"name": "s", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"}, + {"name": "i2", "type": "int8$"}, + {"name": "a", "type": "int8[]$"}, + {"name": "o", "type": "int8?$"}, + ]} + ], + })"; + + try { + abi_serializer abis(fc::json::from_string(abi).as(), max_serialization_time ); + + // missing i1 + BOOST_CHECK_THROW( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5})"), max_serialization_time), abi_exception ); + + // Unexpected 'a' + BOOST_CHECK_THROW( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5,"i1":6,"a":[8,9,10]})"), max_serialization_time), pack_exception ); + + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6})", "0506"); + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7})", "050607"); + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10]})", "0506070308090a"); + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":null})", "0506070308090a00"); + verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":31})", "0506070308090a011f"); + + verify_round_trip_conversion(abis, "s", R"([5,6])", "0506", R"({"i0":5,"i1":6})"); + verify_round_trip_conversion(abis, "s", R"([5,6,7])", "050607", R"({"i0":5,"i1":6,"i2":7})"); + verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10]])", "0506070308090a", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10]})"); + verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10],null])", "0506070308090a00", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":null})"); + verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10],31])", "0506070308090a011f", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":31})"); + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_SUITE_END() From 9696f102bc62694586f109b77f97e23e7bd7177d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 13 Sep 2018 13:18:57 -0500 Subject: [PATCH 136/194] Added lable to allow parrallelizing ctest calls and updated buildkite. GH #5223 --- .buildkite/pipeline.yml | 12 ++++++------ tests/CMakeLists.txt | 11 +++++++++++ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c4ba85ae46e..031b9abb6c2 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -96,7 +96,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -LE long_running_tests --output-on-failure + ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure retry: automatic: limit: 1 @@ -116,7 +116,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure retry: automatic: limit: 1 @@ -140,7 +140,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure retry: automatic: limit: 1 @@ -164,7 +164,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure retry: automatic: limit: 1 @@ -188,7 +188,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure retry: automatic: limit: 1 @@ -212,7 +212,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure retry: automatic: limit: 1 diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ff05bad19c2..d599463ecda 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -47,21 +47,32 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINA add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output) add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME bnet_nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST bnet_nodeos_run_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) if(BUILD_MONGO_DB_PLUGIN) add_test(NAME nodeos_run_test-mongodb COMMAND tests/nodeos_run_test.py --mongodb -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) endif() add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST restart-scenarios-test-hard_replay PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-none COMMAND tests/restart-scenarios-test.py -c none --kill-sig term -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST restart-scenarios-test-none PROPERTY LABELS nonparallelizable_tests) # TODO: add_test(NAME consensus-validation-malicious-producers COMMAND tests/consensus-validation-malicious-producers.py -w 80 --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST validate_dirty_db_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME launcher_test COMMAND tests/launcher_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) From 0693210e8f7e674ebba70c9b89816a44326663a4 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 13 Sep 2018 14:38:13 -0400 Subject: [PATCH 137/194] Update abi version to 1.1 and check versions --- libraries/chain/abi_serializer.cpp | 2 ++ .../chain/include/eosio/chain/abi_def.hpp | 4 +-- .../chain/include/eosio/chain/exceptions.hpp | 2 ++ unittests/abi_tests.cpp | 29 ++++++++++++++----- 4 files changed, 27 insertions(+), 10 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 0f525d96899..b3ad2fced27 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -105,6 +105,8 @@ namespace eosio { namespace chain { void abi_serializer::set_abi(const abi_def& abi, const fc::microseconds& max_serialization_time) { const fc::time_point deadline = fc::time_point::now() + max_serialization_time; + EOS_ASSERT(starts_with(abi.version, "eosio::abi/1."), unsupported_abi_version_exception, "ABI has an unsupported version"); + typedefs.clear(); structs.clear(); actions.clear(); diff --git a/libraries/chain/include/eosio/chain/abi_def.hpp b/libraries/chain/include/eosio/chain/abi_def.hpp index 89f4224d2e9..fdbf1a819ca 100644 --- a/libraries/chain/include/eosio/chain/abi_def.hpp +++ b/libraries/chain/include/eosio/chain/abi_def.hpp @@ -107,7 +107,7 @@ struct may_not_exist { struct abi_def { abi_def() = default; abi_def(const vector& types, const vector& structs, const vector& actions, const vector& tables, const vector& clauses, const vector& error_msgs) - :version("eosio::abi/1.0") + :version("eosio::abi/1.1") ,types(types) ,structs(structs) ,actions(actions) @@ -116,7 +116,7 @@ struct abi_def { ,error_messages(error_msgs) {} - string version = "eosio::abi/1.0"; + string version = "eosio::abi/1.1"; vector types; vector structs; vector actions; diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 5889a582dba..91467e746f6 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -418,6 +418,8 @@ namespace eosio { namespace chain { 3015014, "Pack data exception" ) FC_DECLARE_DERIVED_EXCEPTION( duplicate_abi_variant_def_exception, abi_exception, 3015015, "Duplicate variant definition in the ABI" ) + FC_DECLARE_DERIVED_EXCEPTION( unsupported_abi_version_exception, abi_exception, + 3015016, "ABI has an unsupported version" ) FC_DECLARE_DERIVED_EXCEPTION( contract_exception, chain_exception, 3160000, "Contract exception" ) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index f6e009ca715..e89bf215d98 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -99,7 +99,7 @@ fc::variant verify_type_round_trip_conversion( const abi_serializer& abis, const const char* my_abi = R"=====( { - "version": "", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "type_name", "type": "string" @@ -869,7 +869,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_all_indexes, abi_gen_helper) const char* all_indexes_abi = R"=====( { - "version": "eosio::abi/1.0", + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "table1", @@ -1776,7 +1776,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_no_eosioabi_macro, abi_gen_helper) const char* abigen_no_eosioabi_macro_abi = R"=====( { - "version": "eosio::abi/1.0", + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "hi", @@ -1992,7 +1992,7 @@ BOOST_AUTO_TEST_CASE(general) {"name":"table2","index_type":"indextype2","key_names":["keyname2"],"key_types":["typename2"],"type":"type2"} ], "abidef":{ - "version": "", + "version": "eosio::abi/1.0", "types" : [{"new_type_name":"new", "type":"old"}], "structs" : [{"name":"struct1", "base":"base1", "fields": [{"name":"name1", "type": "type1"}, {"name":"name2", "type": "type2"}] }], "actions" : [{"name":"action1","type":"type1", "ricardian_contract":""}], @@ -2001,7 +2001,7 @@ BOOST_AUTO_TEST_CASE(general) "abi_extensions": [] }, "abidef_arr": [{ - "version": "", + "version": "eosio::abi/1.0", "types" : [{"new_type_name":"new", "type":"old"}], "structs" : [{"name":"struct1", "base":"base1", "fields": [{"name":"name1", "type": "type1"}, {"name":"name2", "type": "type2"}] }], "actions" : [{"name":"action1","type":"type1", "ricardian_contract":""}], @@ -2009,7 +2009,7 @@ BOOST_AUTO_TEST_CASE(general) "ricardian_clauses": [], "abi_extensions": [] },{ - "version": "", + "version": "eosio::abi/1.0", "types" : [{"new_type_name":"new", "type":"old"}], "structs" : [{"name":"struct1", "base":"base1", "fields": [{"name":"name1", "type": "type1"}, {"name":"name2", "type": "type2"}] }], "actions" : [{"name":"action1","type":"type1", "ricardian_contract": ""}], @@ -2390,7 +2390,7 @@ BOOST_AUTO_TEST_CASE(setabi_test) const char* abi_def_abi = R"=====( { - "version": "", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "type_name", "type": "string" @@ -2522,7 +2522,7 @@ BOOST_AUTO_TEST_CASE(setabi_test) const char* abi_string = R"=====( { - "version": "", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3490,6 +3490,7 @@ BOOST_AUTO_TEST_CASE(abi_deep_structs_validate) BOOST_AUTO_TEST_CASE(variants) { auto duplicate_variant_abi = R"({ + "version": "eosio::abi/1.0", "variants": [ {"name": "v1", "types": ["int8", "string", "bool"]}, {"name": "v1", "types": ["int8", "string", "bool"]}, @@ -3497,12 +3498,14 @@ BOOST_AUTO_TEST_CASE(variants) })"; auto variant_abi_invalid_type = R"({ + "version": "eosio::abi/1.0", "variants": [ {"name": "v1", "types": ["int91", "string", "bool"]}, ], })"; auto variant_abi = R"({ + "version": "eosio::abi/1.0", "types": [ {"new_type_name": "foo", "type": "s"}, {"new_type_name": "bar", "type": "s"}, @@ -3587,4 +3590,14 @@ BOOST_AUTO_TEST_CASE(extend) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(version) +{ + try { + BOOST_CHECK_THROW( abi_serializer(fc::json::from_string(R"({"version": ""})").as(), max_serialization_time), unsupported_abi_version_exception ); + BOOST_CHECK_THROW( abi_serializer(fc::json::from_string(R"({"version": "eosio::abi/9.0"})").as(), max_serialization_time), unsupported_abi_version_exception ); + abi_serializer(fc::json::from_string(R"({"version": "eosio::abi/1.0"})").as(), max_serialization_time); + abi_serializer(fc::json::from_string(R"({"version": "eosio::abi/1.1"})").as(), max_serialization_time); + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_SUITE_END() From 8698317cbd273b7ba527f0641b05b94bea503f5e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 15:19:39 -0500 Subject: [PATCH 138/194] Add tuple_io include needed by chainbase key streaming --- libraries/chain/authorization_manager.cpp | 1 + libraries/chain/resource_limits.cpp | 1 + libraries/testing/include/eosio/testing/tester.hpp | 1 + 3 files changed, 3 insertions(+) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index c5b29397bc4..9781f0c9e48 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -12,6 +12,7 @@ #include #include #include +#include namespace eosio { namespace chain { diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 4049252f8f8..6d3176c7fb1 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include namespace eosio { namespace chain { namespace resource_limits { diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index d502adefd15..1bc3bc9f1d9 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -6,6 +6,7 @@ #include #include #include +#include #include From ea2445c374ec9b5dabc280b6515dc1c0f7bcdfe1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 15:28:55 -0500 Subject: [PATCH 139/194] Use emplace to avoid needless temp --- libraries/chain/apply_context.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 250563d8382..94aaae08dae 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -643,12 +643,9 @@ uint64_t apply_context::next_auth_sequence( account_name actor ) { void apply_context::add_ram_usage( account_name account, int64_t ram_delta ) { trx_context.add_ram_usage( account, ram_delta ); - account_delta delta{account, ram_delta}; - auto itr = _account_ram_deltas.find( delta ); - if( itr == _account_ram_deltas.end() ) { - _account_ram_deltas.emplace( delta ); - } else { - itr->delta += ram_delta; + auto p = _account_ram_deltas.emplace( account, ram_delta ); + if( !p.second ) { + p.first->delta += ram_delta; } } From 529b11cef18113fbae2acea295d76f534141e257 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 15:29:52 -0500 Subject: [PATCH 140/194] Update commented out code to call correct add_ram_usage for hard fork --- libraries/chain/apply_context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 94aaae08dae..c89a366b210 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -281,7 +281,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a "Replacing a deferred transaction is temporarily disabled." ); // TODO: The logic of the next line needs to be incorporated into the next hard fork. - // trx_context.add_ram_usage( ptr->payer, -(config::billable_size_v + ptr->packed_trx.size()) ); + // add_ram_usage( ptr->payer, -(config::billable_size_v + ptr->packed_trx.size()) ); d.modify( *ptr, [&]( auto& gtx ) { gtx.sender = receiver; From dc3043260fc93368d44751e18dc48a0a9364e908 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 15:32:19 -0500 Subject: [PATCH 141/194] Remove unneeded check --- libraries/chain/controller.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 0b6dbf59e20..4e4f3e5f2fc 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1532,8 +1532,6 @@ time_point controller::pending_block_time()const { optional controller::pending_producer_block_id()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); - if( my->pending->_block_status == block_status::incomplete ) - return optional(); return my->pending->_producer_block_id; } From c01474a5353de3af6999cffc4fc5c0f08ce748ff Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 15:34:22 -0500 Subject: [PATCH 142/194] Remove in_current_chain --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 45917fa4e26..e7f3e38663a 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -893,8 +893,7 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr auto block_state_doc = bsoncxx::builder::basic::document{}; block_state_doc.append( kvp( "block_num", b_int32{static_cast(block_num)} ), kvp( "block_id", block_id_str ), - kvp( "validated", b_bool{bs->validated} ), - kvp( "in_current_chain", b_bool{bs->in_current_chain} ) ); + kvp( "validated", b_bool{bs->validated} ) ); const chain::block_header_state& bhs = *bs; @@ -983,7 +982,6 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ auto update_doc = make_document( kvp( "$set", make_document( kvp( "irreversible", b_bool{true} ), kvp( "validated", b_bool{bs->validated} ), - kvp( "in_current_chain", b_bool{bs->in_current_chain} ), kvp( "updatedAt", b_date{now} ) ) ) ); _blocks.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); @@ -999,7 +997,6 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ auto update_doc = make_document( kvp( "$set", make_document( kvp( "irreversible", b_bool{true} ), kvp( "validated", b_bool{bs->validated} ), - kvp( "in_current_chain", b_bool{bs->in_current_chain} ), kvp( "updatedAt", b_date{now} ) ) ) ); _block_states.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); From 3aab9499243b65ec6baf98d9017c90007abb8b48 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 13 Sep 2018 16:48:56 -0400 Subject: [PATCH 143/194] get_raw_abi --- plugins/chain_plugin/chain_plugin.cpp | 5 +++-- .../include/eosio/chain_plugin/chain_plugin.hpp | 8 +++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 53c0df50cf2..fe3b4b28332 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1572,9 +1572,10 @@ read_only::get_raw_abi_results read_only::get_raw_abi( const get_raw_abi_params& const auto& d = db.db(); const auto& accnt = d.get(params.account_name); - // todo: fetch hash from table in eosio account + result.abi_hash = fc::sha256::hash( accnt.abi.data(), accnt.abi.size() ); result.code_hash = fc::sha256::hash( accnt.code.data(), accnt.code.size() ); - result.abi = blob{{accnt.abi.begin(), accnt.abi.end()}}; + if( !params.abi_hash || *params.abi_hash != result.abi_hash ) + result.abi = blob{{accnt.abi.begin(), accnt.abi.end()}}; return result; } diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 40f395ac9e8..d4f4d49ea6e 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -182,12 +182,14 @@ class read_only { struct get_raw_abi_params { name account_name; + optional abi_hash; }; struct get_raw_abi_results { name account_name; fc::sha256 code_hash; - chain::blob abi; + fc::sha256 abi_hash; + optional abi; }; @@ -696,8 +698,8 @@ FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_abi_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_raw_code_and_abi_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_raw_code_and_abi_results, (account_name)(wasm)(abi) ) -FC_REFLECT( eosio::chain_apis::read_only::get_raw_abi_params, (account_name) ) -FC_REFLECT( eosio::chain_apis::read_only::get_raw_abi_results, (account_name)(code_hash)(abi) ) +FC_REFLECT( eosio::chain_apis::read_only::get_raw_abi_params, (account_name)(abi_hash) ) +FC_REFLECT( eosio::chain_apis::read_only::get_raw_abi_results, (account_name)(code_hash)(abi_hash)(abi) ) FC_REFLECT( eosio::chain_apis::read_only::producer_info, (producer_name) ) FC_REFLECT( eosio::chain_apis::read_only::abi_json_to_bin_params, (code)(action)(args) ) FC_REFLECT( eosio::chain_apis::read_only::abi_json_to_bin_result, (binargs) ) From 35f2c830348c38f066da2fae9d1c66bbde6b699a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 16:08:58 -0500 Subject: [PATCH 144/194] Add constructors to account_delta --- libraries/chain/include/eosio/chain/trace.hpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index fa5e18b28b3..ad02baf5bac 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -11,8 +11,11 @@ namespace eosio { namespace chain { struct account_delta { + account_delta( const account_name& n, int64_t d):account(n),delta(d){} + account_delta(){} + account_name account; - int64_t delta; + int64_t delta = 0; friend bool operator<( const account_delta& lhs, const account_delta& rhs ) { return lhs.account < rhs.account; } }; From 1fe32fbd29d1a50c27a42a7e15f1208689df999b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 18:06:17 -0500 Subject: [PATCH 145/194] Update chainbase to unkown-key branch --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index 0a347683902..45d6f7f9afa 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 0a347683902f3ebbf58a4dd6166d68d967e240ce +Subproject commit 45d6f7f9afa6245df5e78e68da15a7972c713ff2 From 8636e7ad351852174b89939b61e43ff21778d56e Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 13 Sep 2018 19:13:28 -0400 Subject: [PATCH 146/194] abi_serializer: enforce some binary extension rules --- libraries/chain/abi_serializer.cpp | 24 +++++++++---------- .../include/eosio/chain/abi_serializer.hpp | 10 ++++---- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index b3ad2fced27..255ef2250ec 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -345,7 +345,7 @@ namespace eosio { namespace chain { return _binary_to_variant(type, ds, recursion_depth, deadline, max_serialization_time); } - void abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, + void abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, bool allow_extensions, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const { try { EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); @@ -359,7 +359,7 @@ namespace eosio { namespace chain { vector vars = var.get_array(); fc::raw::pack(ds, (fc::unsigned_int)vars.size()); for (const auto& var : vars) { - _variant_to_binary(fundamental_type(rtype), var, ds, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(fundamental_type(rtype), var, ds, false, recursion_depth, deadline, max_serialization_time); } } else if ( variants.find(rtype) != variants.end() ) { EOS_ASSERT( var.is_array() && var.size() == 2 && var[size_t(0)].is_string(), abi_exception, "expected array containing variant" ); @@ -367,7 +367,7 @@ namespace eosio { namespace chain { auto it = find(v.types.begin(), v.types.end(), var[size_t(0)].get_string()); EOS_ASSERT( it != v.types.end(), abi_exception, "type is not valid within this variant" ); fc::raw::pack(ds, fc::unsigned_int(it - v.types.begin())); - _variant_to_binary( *it, var[size_t(1)], ds, recursion_depth, deadline, max_serialization_time ); + _variant_to_binary( *it, var[size_t(1)], ds, allow_extensions, recursion_depth, deadline, max_serialization_time ); } else { const auto& st = get_struct(rtype); @@ -375,18 +375,18 @@ namespace eosio { namespace chain { const auto& vo = var.get_object(); if( st.base != type_name() ) { - _variant_to_binary(resolve_type(st.base), var, ds, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(resolve_type(st.base), var, ds, false, recursion_depth, deadline, max_serialization_time); } bool missing_extension = false; for( const auto& field : st.fields ) { if( vo.contains( string(field.name).c_str() ) ) { if( missing_extension ) EOS_THROW( pack_exception, "Unexpected '${f}' in variant object", ("f",field.name) ); - _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, recursion_depth, deadline, max_serialization_time); - } else if( ends_with(field.type, "$") ) { + _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); + } else if( ends_with(field.type, "$") && allow_extensions ) { missing_extension = true; } else { - _variant_to_binary(field.type, fc::variant(), ds, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(field.type, fc::variant(), ds, false, recursion_depth, deadline, max_serialization_time); /// TODO: default construct field and write it out EOS_THROW( pack_exception, "Missing '${f}' in variant object", ("f",field.name) ); } @@ -398,11 +398,11 @@ namespace eosio { namespace chain { if (va.size() > 0) { for( const auto& field : st.fields ) { if( va.size() > i ) - _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, recursion_depth, deadline, max_serialization_time); - else if( ends_with(field.type, "$") ) + _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); + else if( ends_with(field.type, "$") && allow_extensions ) break; else - _variant_to_binary(field.type, fc::variant(), ds, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(field.type, fc::variant(), ds, false, recursion_depth, deadline, max_serialization_time); ++i; } } @@ -410,7 +410,7 @@ namespace eosio { namespace chain { } } FC_CAPTURE_AND_RETHROW( (type)(var) ) } - bytes abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, + bytes abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, bool allow_extensions, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const { try { EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); @@ -421,7 +421,7 @@ namespace eosio { namespace chain { bytes temp( 1024*1024 ); fc::datastream ds(temp.data(), temp.size() ); - _variant_to_binary(type, var, ds, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(type, var, ds, allow_extensions, recursion_depth, deadline, max_serialization_time); temp.resize(ds.tellp()); return temp; } FC_CAPTURE_AND_RETHROW( (type)(var) ) } diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 9c2a1e903b9..77f3a6267b5 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -53,14 +53,14 @@ struct abi_serializer { return _binary_to_variant(type, binary, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); } bytes variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time)const { - return _variant_to_binary(type, var, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); + return _variant_to_binary(type, var, true, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); } fc::variant binary_to_variant(const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time)const { return _binary_to_variant(type, binary, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); } void variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time)const { - _variant_to_binary(type, var, ds, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); + _variant_to_binary(type, var, ds, true, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); } template @@ -107,12 +107,12 @@ struct abi_serializer { fc::variant _binary_to_variant(const type_name& type, const bytes& binary, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - bytes _variant_to_binary(const type_name& type, const fc::variant& var, + bytes _variant_to_binary(const type_name& type, const fc::variant& var, bool allow_extensions, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; fc::variant _binary_to_variant(const type_name& type, fc::datastream& binary, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - void _variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, + void _variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, bool allow_extensions, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; void _binary_to_variant(const type_name& type, fc::datastream& stream, fc::mutable_variant_object& obj, @@ -468,7 +468,7 @@ namespace impl { if (abi.valid()) { auto type = abi->get_action_type(act.name); if (!type.empty()) { - act.data = std::move( abi->_variant_to_binary( type, data, recursion_depth, deadline, max_serialization_time )); + act.data = std::move( abi->_variant_to_binary( type, data, true, recursion_depth, deadline, max_serialization_time )); valid_empty_data = act.data.empty(); } } From 7ffc3f9ffcb533ab944243c8a6cc29d11f3f4e60 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 13 Sep 2018 18:39:23 -0500 Subject: [PATCH 147/194] Update to master --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index 45d6f7f9afa..9bfe5043f54 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 45d6f7f9afa6245df5e78e68da15a7972c713ff2 +Subproject commit 9bfe5043f5484e00f89387091e9b5beb90b88c62 From c229e4d386cc0f5947872f25cd95f5363c9f1c13 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 13 Sep 2018 17:05:50 -0500 Subject: [PATCH 148/194] Moved call to chain_plugin till startup phase. GH #5653 --- plugins/test_control_plugin/test_control_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp index 0232b1dcf6a..f2d630c6c59 100644 --- a/plugins/test_control_plugin/test_control_plugin.cpp +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -108,7 +108,6 @@ void test_control_plugin_impl::kill_on_head(account_name prod, uint32_t where_in } test_control_plugin::test_control_plugin() -: my(new test_control_plugin_impl(app().get_plugin().chain())) { } @@ -119,6 +118,7 @@ void test_control_plugin::plugin_initialize(const variables_map& options) { } void test_control_plugin::plugin_startup() { + my.reset(new test_control_plugin_impl(app().get_plugin().chain())); my->connect(); } From bad3598e25f8530abbf0907002ed531dcd54d785 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 13 Sep 2018 17:07:18 -0500 Subject: [PATCH 149/194] Fixed test to use if checks and exitError method instead of assert, so that shutdown function can be called. GH #5653 --- tests/nodeos_forked_chain_test.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 5a9a1152ee3..4c4105721d5 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -344,13 +344,15 @@ def getMinHeadAndLib(prodNodes): firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) # Nodes should not have diverged till the last block - assert(firstDivergence==blockNum) + if firstDivergence!=blockNum: + Utils.errorExit("Expected to diverge at %s, but diverged at %s." % (firstDivergence, blockNum)) blockProducers0=[] blockProducers1=[] #verify that the non producing node is not alive (and populate the producer nodes with current getInfo data to report if #an error occurs) - assert(not nonProdNode.verifyAlive()) + if nonProdNode.verifyAlive(): + Utils.errorExit("Expected the non-producing node to have shutdown.") for prodNode in prodNodes: prodNode.getInfo() @@ -369,7 +371,8 @@ def getMinHeadAndLib(prodNodes): # *** Analyze the producers from the divergence to the lastBlockNum and verify they stay diverged *** firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) - assert(firstDivergence==killBlockNum) + if firstDivergence!=killBlockNum: + Utils.errorExit("Expected to diverge at %s, but diverged at %s." % (firstDivergence, killBlockNum)) blockProducers0=[] blockProducers1=[] @@ -397,8 +400,8 @@ def getMinHeadAndLib(prodNodes): # *** Analyze the producers from the saved LIB to the current highest head and verify they match now *** - firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=False) - assert(firstDivergence==None) + analyzeBPs(blockProducers0, blockProducers1, expectDivergence=False) + blockProducers0=[] blockProducers1=[] From 73c3fc546d8668b0259c8825122d72cc99ce6cd3 Mon Sep 17 00:00:00 2001 From: Eugene Chung Date: Fri, 14 Sep 2018 08:04:40 -0700 Subject: [PATCH 150/194] check unlock-timeout overflow Should be added if timeout_time.time_since_epoch().count() is negative on Ubuntu 16.04. $ ./build/programs/keyos/keyos --unlock-timeout 9999999999 --http-server-address 127.0.0.1:8900 2018-09-14T15:00:44.373 thread-0 wallet_plugin.cpp:42 plugin_initialize ] initializing wallet plugin 2018-09-14T15:00:44.373 thread-0 wallet_plugin.cpp:68 plugin_initialize ] 3120011 invalid_lock_timeout_exception: Wallet lock timeout is invalid Overflow on timeout_time, specified 9999999999, now 1536937244373317352, timeout_time -6909806830336234264 {"t":"9999999999","now":"1536937244373317352","timeout_time":-6909806830336234264} thread-0 wallet_manager.cpp:47 set_timeout --- plugins/wallet_plugin/wallet_manager.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/wallet_plugin/wallet_manager.cpp b/plugins/wallet_plugin/wallet_manager.cpp index 5c6417f13f4..7b791e3bd8d 100644 --- a/plugins/wallet_plugin/wallet_manager.cpp +++ b/plugins/wallet_plugin/wallet_manager.cpp @@ -37,7 +37,7 @@ void wallet_manager::set_timeout(const std::chrono::seconds& t) { timeout = t; auto now = std::chrono::system_clock::now(); timeout_time = now + timeout; - EOS_ASSERT(timeout_time >= now, invalid_lock_timeout_exception, "Overflow on timeout_time, specified ${t}, now ${now}, timeout_time ${timeout_time}", + EOS_ASSERT(timeout_time >= now && timeout_time.time_since_epoch().count() > 0, invalid_lock_timeout_exception, "Overflow on timeout_time, specified ${t}, now ${now}, timeout_time ${timeout_time}", ("t", t.count())("now", now.time_since_epoch().count())("timeout_time", timeout_time.time_since_epoch().count())); } From 4d873ed0d2f23c5f138e821e361e09894487f167 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 14 Sep 2018 12:08:08 -0500 Subject: [PATCH 151/194] Fix shutdown problem where client destructor running after pool destruction. --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index e7f3e38663a..3e26770d418 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -455,7 +455,6 @@ void mongo_db_plugin_impl::consume_blocks() { break; } } - mongo_pool.reset(); ilog("mongo_db_plugin consume thread shutdown gracefully"); } catch (fc::exception& e) { elog("FC Exception while consuming block ${e}", ("e", e.to_string())); @@ -1270,6 +1269,8 @@ mongo_db_plugin_impl::~mongo_db_plugin_impl() { condition.notify_one(); consume_thread.join(); + + mongo_pool.reset(); } catch( std::exception& e ) { elog( "Exception on mongo_db_plugin shutdown of consume thread: ${e}", ("e", e.what())); } From 904cb2d732abba8ef88ec1cfe26aa22255a62b97 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Fri, 14 Sep 2018 14:07:36 -0400 Subject: [PATCH 152/194] abi_def: don't default version --- contracts/identity/test/identity_test.abi | 1 + contracts/payloadless/payloadless.abi | 1 + contracts/test_ram_limit/test_ram_limit.abi | 1 + .../chain/include/eosio/chain/abi_def.hpp | 5 ++- programs/eosio-abigen/main.cpp | 1 + unittests/abi_tests.cpp | 31 +++++++++++++++++++ 6 files changed, 37 insertions(+), 3 deletions(-) diff --git a/contracts/identity/test/identity_test.abi b/contracts/identity/test/identity_test.abi index 938d5ef2c8a..a0b450f8ac4 100644 --- a/contracts/identity/test/identity_test.abi +++ b/contracts/identity/test/identity_test.abi @@ -1,4 +1,5 @@ { + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" diff --git a/contracts/payloadless/payloadless.abi b/contracts/payloadless/payloadless.abi index c68563b7c14..6ba0f1c4aa4 100644 --- a/contracts/payloadless/payloadless.abi +++ b/contracts/payloadless/payloadless.abi @@ -1,4 +1,5 @@ { + "version": "eosio::abi/1.1", "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-04-19T09:07:16", "types": [], "structs": [{ diff --git a/contracts/test_ram_limit/test_ram_limit.abi b/contracts/test_ram_limit/test_ram_limit.abi index 366fa0d064c..3687c7593ef 100644 --- a/contracts/test_ram_limit/test_ram_limit.abi +++ b/contracts/test_ram_limit/test_ram_limit.abi @@ -1,5 +1,6 @@ { "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-03-29T02:09:11", + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "account_name", "type": "name" diff --git a/libraries/chain/include/eosio/chain/abi_def.hpp b/libraries/chain/include/eosio/chain/abi_def.hpp index fdbf1a819ca..f00dd19884d 100644 --- a/libraries/chain/include/eosio/chain/abi_def.hpp +++ b/libraries/chain/include/eosio/chain/abi_def.hpp @@ -107,8 +107,7 @@ struct may_not_exist { struct abi_def { abi_def() = default; abi_def(const vector& types, const vector& structs, const vector& actions, const vector& tables, const vector& clauses, const vector& error_msgs) - :version("eosio::abi/1.1") - ,types(types) + :types(types) ,structs(structs) ,actions(actions) ,tables(tables) @@ -116,7 +115,7 @@ struct abi_def { ,error_messages(error_msgs) {} - string version = "eosio::abi/1.1"; + string version = ""; vector types; vector structs; vector actions; diff --git a/programs/eosio-abigen/main.cpp b/programs/eosio-abigen/main.cpp index c68f668588c..689998ca624 100644 --- a/programs/eosio-abigen/main.cpp +++ b/programs/eosio-abigen/main.cpp @@ -86,6 +86,7 @@ int main(int argc, const char **argv) { abi_def output; try { vector actions; int result = Tool.run(create_find_macro_factory(contract, actions, abi_context).get()); if(!result) { + output.version = "eosio::abi/1.1"; result = Tool.run(create_factory(abi_verbose, abi_opt_sfs, abi_context, output, contract, actions).get()); if(!result) { abi_serializer abis(output, fc::seconds(1)); // No risk to client side serialization taking a long time diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index e89bf215d98..69e1e498ef9 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -494,6 +494,7 @@ BOOST_AUTO_TEST_CASE(uint_types) const char* currency_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "transfer", @@ -555,6 +556,7 @@ struct abi_gen_helper { std::string stdc_include_param = std::string("-I") + eosiolib_path + "/musl/upstream/include"; abi_def output; + output.version = "eosio::abi/1.1"; std::string contract; std::vector actions; @@ -661,6 +663,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_all_types, abi_gen_helper) const char* all_types_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "test_struct", @@ -808,6 +811,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_double_action, abi_gen_helper) const char* double_action_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name" : "A", @@ -1026,6 +1030,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_full_table_decl, abi_gen_helper) const char* full_table_decl_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name" : "table1", @@ -1124,6 +1129,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_template_base, abi_gen_helper) const char* template_base_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name" : "base32", @@ -1179,6 +1185,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_action_and_table, abi_gen_helper) const char* action_and_table_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name" : "table_action", @@ -1238,6 +1245,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_simple_typedef, abi_gen_helper) const char* simple_typedef_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name" : "my_base_alias", "type" : "common_params" @@ -1304,6 +1312,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_field_typedef, abi_gen_helper) const char* field_typedef_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name" : "my_complex_field_alias", "type" : "complex_field" @@ -1379,6 +1388,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_vector_of_POD, abi_gen_helper) const char* abigen_vector_of_POD_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "table1", @@ -1452,6 +1462,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_vector_of_structs, abi_gen_helper) const char* abigen_vector_of_structs_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "my_struct", @@ -1557,6 +1568,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_vector_alias, abi_gen_helper) const char* abigen_vector_alias_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "array_of_rows", "type": "row[]" @@ -1633,6 +1645,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_eosioabi_macro, abi_gen_helper) const char* abigen_eosioabi_macro_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "hi", @@ -1695,6 +1708,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_contract_inheritance, abi_gen_helper) const char* abigen_contract_inheritance_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "hi", @@ -2046,6 +2060,7 @@ BOOST_AUTO_TEST_CASE(abi_cycle) const char* struct_cycle_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "A", @@ -2782,6 +2797,7 @@ BOOST_AUTO_TEST_CASE(packed_transaction) const char* packed_transaction_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "compression_type", "type": "int64" @@ -2864,6 +2880,7 @@ BOOST_AUTO_TEST_CASE(abi_type_repeat) const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -2924,6 +2941,7 @@ BOOST_AUTO_TEST_CASE(abi_struct_repeat) const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -2980,6 +2998,7 @@ BOOST_AUTO_TEST_CASE(abi_action_repeat) const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -3039,6 +3058,7 @@ BOOST_AUTO_TEST_CASE(abi_table_repeat) const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -3101,6 +3121,7 @@ BOOST_AUTO_TEST_CASE(abi_type_def) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3153,6 +3174,7 @@ BOOST_AUTO_TEST_CASE(abi_type_loop) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3196,6 +3218,7 @@ BOOST_AUTO_TEST_CASE(abi_type_redefine) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "account_name", "type": "account_name" @@ -3236,6 +3259,7 @@ BOOST_AUTO_TEST_CASE(abi_type_redefine_to_name) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "name", "type": "name" @@ -3257,6 +3281,7 @@ BOOST_AUTO_TEST_CASE(abi_type_nested_in_vector) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "store_t", @@ -3282,6 +3307,7 @@ BOOST_AUTO_TEST_CASE(abi_account_name_in_eosio_abi) // inifinite loop in types const char* repeat_abi = R"=====( { + "version": "eosio::abi/1.1", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3324,6 +3350,7 @@ BOOST_AUTO_TEST_CASE(abi_large_array) try { const char* abi_str = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [{ "name": "hi", @@ -3361,6 +3388,7 @@ BOOST_AUTO_TEST_CASE(abi_is_type_recursion) try { const char* abi_str = R"=====( { + "version": "eosio::abi/1.1", "types": [ { "new_type_name": "a[]", @@ -3404,6 +3432,7 @@ BOOST_AUTO_TEST_CASE(abi_recursive_structs) try { const char* abi_str = R"=====( { + "version": "eosio::abi/1.1", "types": [], "structs": [ { @@ -3556,6 +3585,7 @@ BOOST_AUTO_TEST_CASE(variants) BOOST_AUTO_TEST_CASE(extend) { auto abi = R"({ + "version": "eosio::abi/1.1", "structs": [ {"name": "s", "base": "", "fields": [ {"name": "i0", "type": "int8"}, @@ -3593,6 +3623,7 @@ BOOST_AUTO_TEST_CASE(extend) BOOST_AUTO_TEST_CASE(version) { try { + BOOST_CHECK_THROW( abi_serializer(fc::json::from_string(R"({})").as(), max_serialization_time), unsupported_abi_version_exception ); BOOST_CHECK_THROW( abi_serializer(fc::json::from_string(R"({"version": ""})").as(), max_serialization_time), unsupported_abi_version_exception ); BOOST_CHECK_THROW( abi_serializer(fc::json::from_string(R"({"version": "eosio::abi/9.0"})").as(), max_serialization_time), unsupported_abi_version_exception ); abi_serializer(fc::json::from_string(R"({"version": "eosio::abi/1.0"})").as(), max_serialization_time); From 45346c226dfbb46c8cf468dfd1fb26405f19b4fc Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 14 Sep 2018 14:06:37 -0500 Subject: [PATCH 153/194] Add diagnostics in Node.getTransaction to walk the blocks on the node and print out when transaction cannot be found. GH #5674 --- tests/Node.py | 55 +++++++++++++++++-- tests/WalletMgr.py | 6 +- ...onsensus-validation-malicious-producers.py | 2 +- tests/launcher_test.py | 2 +- tests/nodeos_run_test.py | 4 +- 5 files changed, 56 insertions(+), 13 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index dbb04278f82..0bb7e861440 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -248,10 +248,45 @@ def isBlockFinalized(self, blockNum): """Is blockNum finalized""" return self.isBlockPresent(blockNum, blockType=BlockType.lib) + class BlockWalker: + def __init__(self, node, trans, startBlockNum=None, endBlockNum=None): + self.trans=trans + self.node=node + self.startBlockNum=startBlockNum + self.endBlockNum=endBlockNum + + def walkBlocks(self): + start=None + end=None + blockNum=self.trans["processed"]["action_traces"][0]["block_num"] + # it should be blockNum or later, but just in case the block leading up have any clues... + if self.startBlockNum is not None: + start=self.startBlockNum + else: + start=blockNum-5 + if self.endBlockNum is not None: + end=self.endBlockNum + else: + info=self.node.getInfo() + end=info["head_block_num"] + msg="Original transaction=\n%s\nExpected block_num=%s\n" % (json.dumps(trans, indent=2, sort_keys=True), blockNum) + for blockNum in range(start, end+1): + block=self.node.getBlock(blockNum) + msg+=json.dumps(block, indent=2, sort_keys=True)+"\n" + # pylint: disable=too-many-branches - def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayedRetry=True): + def getTransaction(self, transOrTransId, silentErrors=False, exitOnError=False, delayedRetry=True): + transId=None + trans=None + assert(isinstance(transOrTransId, (str,dict))) + if isinstance(transOrTransId, str): + transId=transOrTransId + else: + trans=transOrTransId + transId=Node.getTransId(trans) exitOnErrorForDelayed=not delayedRetry and exitOnError timeout=3 + blockWalker=None if not self.enableMongo: cmdDesc="get transaction" cmd="%s %s" % (cmdDesc, transId) @@ -260,9 +295,12 @@ def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayed trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed, exitMsg=msg) if trans is not None or not delayedRetry: return trans + if blockWalker is None: + blockWalker=Node.BlockWalker(node, trans) if Utils.Debug: Utils.Print("Could not find transaction with id %s, delay and retry" % (transId)) time.sleep(timeout) + msg+="\nBlock printout -->>\n%s" % blockWalker.walkBlocks(); # either it is there or the transaction has timed out return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) else: @@ -329,11 +367,16 @@ def isTransInBlock(self, transId, blockId): return False - def getBlockIdByTransId(self, transId, delayedRetry=True): - """Given a transaction Id (string), will return block id (int) containing the transaction""" - assert(transId) - assert(isinstance(transId, str)) - trans=self.getTransaction(transId, exitOnError=True, delayedRetry=delayedRetry) + def getBlockIdByTransId(self, transOrTransId, delayedRetry=True): + """Given a transaction (dictionary) or transaction Id (string), will return the actual block id (int) containing the transaction""" + assert(transOrTransId) + transId=None + assert(isinstance(transOrTransId, (str,dict))) + if isinstance(transOrTransId, str): + transId=transOrTransId + else: + transId=Node.getTransId(transOrTransId) + trans=self.getTransaction(transOrTransId, exitOnError=True, delayedRetry=delayedRetry) refBlockNum=None key="" diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 829ac773dd2..c46dd78d6fd 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -72,10 +72,10 @@ def create(self, name, accounts=None, exitOnError=True): continue msg=ex.output.decode("utf-8") - msg="ERROR: Failed to create wallet - %s. %s" % (name, msg) + errorMsg="ERROR: Failed to create wallet - %s. %s" % (name, msg) if exitOnError: - Utils.errorExit("%s" % (msg)) - Utils.Print("%s" % (msg)) + Utils.errorExit("%s" % (errorMsg)) + Utils.Print("%s" % (errorMsg)) return None m=p.search(retStr) diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index c92acfde04d..52e1bc27bcf 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -328,7 +328,7 @@ def myTest(transWillEnterBlock): return False Print("Get details for transaction %s" % (transId)) - transaction=node2.getTransaction(transId, exitOnError=True) + transaction=node2.getTransaction(trans[1], exitOnError=True) signature=transaction["transaction"]["signatures"][0] blockNum=int(transaction["transaction"]["ref_block_num"]) diff --git a/tests/launcher_test.py b/tests/launcher_test.py index 8581c23d301..4db21658aa8 100755 --- a/tests/launcher_test.py +++ b/tests/launcher_test.py @@ -191,7 +191,7 @@ node.waitForTransInBlock(transId) - transaction=node.getTransaction(transId, exitOnError=True, delayedRetry=False) + transaction=node.getTransaction(trans, exitOnError=True, delayedRetry=False) typeVal=None amountVal=None diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 019a50de2ce..f28f62a730a 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -282,7 +282,7 @@ node.waitForTransInBlock(transId) - transaction=node.getTransaction(transId, exitOnError=True, delayedRetry=False) + transaction=node.getTransaction(trans, exitOnError=True, delayedRetry=False) typeVal=None amountVal=None @@ -467,7 +467,7 @@ raise Print("Test for block decoded packed transaction (issue 2932)") - blockId=node.getBlockIdByTransId(transId) + blockId=node.getBlockIdByTransId(trans[1]) assert(blockId) block=node.getBlock(blockId, exitOnError=True) From e66358cbb75b2b64ab5580462c4c9226a61a4579 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 14 Sep 2018 15:32:47 -0400 Subject: [PATCH 154/194] (appbase sync) disallow unknown configs in config.ini --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index fa0e7fd9aa8..6e440a7f3c5 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit fa0e7fd9aa8be6ddc0c2f620cae63e58fefafab2 +Subproject commit 6e440a7f3c51f3b8226860663b5eb6446087fed9 From 0530e1ec5e0b8838b611c52241d735fc8b73500a Mon Sep 17 00:00:00 2001 From: Baegjae Sung Date: Tue, 11 Sep 2018 10:47:44 +0900 Subject: [PATCH 155/194] Correct assert message by including delay_max_limit_ms delay_max_limit_ms is defined to print delay_max_limit. However, delay_max_limit_ms was not included in the assert message. As intended, delay_max_limit_ms is included in the assert message. --- libraries/chain/authorization_manager.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 9781f0c9e48..02207578d98 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -403,7 +403,8 @@ namespace eosio { namespace chain { EOS_ASSERT( checker.satisfied( p.first, p.second ), unsatisfied_authorization, "transaction declares authority '${auth}', " "but does not have signatures for it under a provided delay of ${provided_delay} ms, " - "provided permissions ${provided_permissions}, and provided keys ${provided_keys}", + "provided permissions ${provided_permissions}, provided keys ${provided_keys}, " + "and a delay max limit of ${delay_max_limit_ms} ms", ("auth", p.first) ("provided_delay", provided_delay.count()/1000) ("provided_permissions", provided_permissions) @@ -444,7 +445,8 @@ namespace eosio { namespace chain { EOS_ASSERT( checker.satisfied({account, permission}), unsatisfied_authorization, "permission '${auth}' was not satisfied under a provided delay of ${provided_delay} ms, " - "provided permissions ${provided_permissions}, and provided keys ${provided_keys}", + "provided permissions ${provided_permissions}, provided keys ${provided_keys}, " + "and a delay max limit of ${delay_max_limit_ms} ms", ("auth", permission_level{account, permission}) ("provided_delay", provided_delay.count()/1000) ("provided_permissions", provided_permissions) From 22c42e4aa3b7cc3ba73daeff534436ddb09301f7 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 14 Sep 2018 16:43:43 -0400 Subject: [PATCH 156/194] fix bugs in abi_serializer variant to binary --- libraries/chain/abi_serializer.cpp | 25 ++++--- unittests/abi_tests.cpp | 112 +++++++++++++++++++++++++++-- 2 files changed, 118 insertions(+), 19 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 255ef2250ec..1c654ddc4d8 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -312,7 +312,7 @@ namespace eosio { namespace chain { vars.emplace_back(std::move(v)); } EOS_ASSERT( vars.size() == size.value, - unpack_exception, + unpack_exception, "packed size does not match unpacked array size, packed size ${p} actual size ${a}", ("p", size)("a", vars.size()) ); return fc::variant( std::move(vars) ); @@ -386,8 +386,6 @@ namespace eosio { namespace chain { } else if( ends_with(field.type, "$") && allow_extensions ) { missing_extension = true; } else { - _variant_to_binary(field.type, fc::variant(), ds, false, recursion_depth, deadline, max_serialization_time); - /// TODO: default construct field and write it out EOS_THROW( pack_exception, "Missing '${f}' in variant object", ("f",field.name) ); } } @@ -395,17 +393,18 @@ namespace eosio { namespace chain { const auto& va = var.get_array(); EOS_ASSERT( st.base == type_name(), invalid_type_inside_abi, "support for base class as array not yet implemented" ); uint32_t i = 0; - if (va.size() > 0) { - for( const auto& field : st.fields ) { - if( va.size() > i ) - _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); - else if( ends_with(field.type, "$") && allow_extensions ) - break; - else - _variant_to_binary(field.type, fc::variant(), ds, false, recursion_depth, deadline, max_serialization_time); - ++i; - } + for( const auto& field : st.fields ) { + if( va.size() > i ) + _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); + else if( ends_with(field.type, "$") && allow_extensions ) + break; + else + EOS_THROW( pack_exception, "Early end to array specifying the fields of struct '${t}'; require input for field '${f}'", + ("t", st.name)("f", field.name) ); + ++i; } + } else { + EOS_THROW( pack_exception, "Failed to serialize struct '${t}' in variant object", ("t", st.name)); } } } FC_CAPTURE_AND_RETHROW( (type)(var) ) } diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index e89bf215d98..b612498a4cb 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -558,7 +559,7 @@ struct abi_gen_helper { std::string contract; std::vector actions; - + auto extra_args = std::vector{"-fparse-all-comments", "--std=c++14", "--target=wasm32", "-ffreestanding", "-nostdlib", "-nostdlibinc", "-fno-threadsafe-statics", "-fno-rtti", "-fno-exceptions", include_param, boost_include_param, stdcpp_include_param, @@ -567,7 +568,7 @@ struct abi_gen_helper { bool res = runToolOnCodeWithArgs( new find_eosio_abi_macro_action(contract, actions, ""), source, - extra_args + extra_args ); FC_ASSERT(res == true); @@ -3438,7 +3439,16 @@ BOOST_AUTO_TEST_CASE(abi_recursive_structs) "type": "a" } ] - } + }, + { + "name": "hi2", + "base": "", + "fields": [{ + "name": "user", + "type": "name" + } + ] + } ], "actions": [{ "name": "hi", @@ -3449,10 +3459,10 @@ BOOST_AUTO_TEST_CASE(abi_recursive_structs) "tables": [] } )====="; - + abi_serializer abis(fc::json::from_string(abi_str).as(), max_serialization_time); - string hi_data = "{\"user\":\"eosio\",\"arg2\":{\"user\":\"1\"}}"; - auto bin = abis.variant_to_binary("hi", fc::json::from_string(hi_data), max_serialization_time); + string hi_data = "{\"user\":\"eosio\"}"; + auto bin = abis.variant_to_binary("hi2", fc::json::from_string(hi_data), max_serialization_time); BOOST_CHECK_THROW( abis.binary_to_variant("hi", bin, max_serialization_time);, fc::exception ); } FC_LOG_AND_RETHROW() @@ -3600,4 +3610,94 @@ BOOST_AUTO_TEST_CASE(version) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_array) +{ + using eosio::testing::fc_exception_message_starts_with; + + auto abi = R"({ + "version": "eosio::abi/1.0", + "structs": [ + {"name": "s", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"}, + {"name": "i2", "type": "int8"} + ]} + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Early end to array specifying the fields of struct") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([1,2])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Early end to array specifying the fields of struct") ); + + verify_round_trip_conversion(abis, "s", R"([1,2,3])", "010203", R"({"i0":1,"i1":2,"i2":3})"); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_object) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.0", + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"} + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "s1"} + {"name": "i2", "type": "int8"} + ]} + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing 'f0' in variant object") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":{"i0":1}})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing 'i1' in variant object") ); + + verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1,"i1":2},"i2":3})", "010203"); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(abi_serialize_json_mismatching_type) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.0", + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "s1"} + {"name": "i1", "type": "int8"} + ]} + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":1,"i1":2})"), max_serialization_time), + pack_exception, fc_exception_message_is("Failed to serialize struct 's1' in variant object") ); + + verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1},"i1":2})", "0102"); + + } FC_LOG_AND_RETHROW() +} + + BOOST_AUTO_TEST_SUITE_END() From 84149542918c7a7eb388ac20cb2b13ed34bb176a Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 14 Sep 2018 16:50:56 -0400 Subject: [PATCH 157/194] Further parallelize unittest on buildkit Breakout the non-parallelizable tests in to their own buildkite unit so they can easily be run simultaneously with the other parallelizable ones. --- .buildkite/pipeline.yml | 152 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 146 insertions(+), 6 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 031b9abb6c2..b26a20f569b 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -96,7 +96,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure + ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -108,6 +108,26 @@ steps: - "build/genesis.json" - "build/config.ini" timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":darwin: NP Tests" + agents: + - "role=macos-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -116,7 +136,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -132,6 +152,30 @@ steps: image: "eosio/ci:ubuntu" workdir: /data/job timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":ubuntu: NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:ubuntu" + workdir: /data/job + timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -140,7 +184,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -157,6 +201,30 @@ steps: workdir: /data/job timeout: 60 + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":ubuntu: 18.04 NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:ubuntu18" + workdir: /data/job + timeout: 60 + - command: | echo "--- :arrow_down: Downloading build directory" && \ buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ @@ -164,7 +232,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -181,6 +249,30 @@ steps: workdir: /data/job timeout: 60 + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":fedora: NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:fedora" + workdir: /data/job + timeout: 60 + - command: | echo "--- :arrow_down: Downloading build directory" && \ buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ @@ -188,7 +280,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -205,6 +297,30 @@ steps: workdir: /data/job timeout: 60 + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":centos: NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:centos" + workdir: /data/job + timeout: 60 + - command: | echo "--- :arrow_down: Downloading build directory" && \ buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ @@ -212,7 +328,7 @@ steps: echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure && ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && ctest -j8 -LE _tests --output-on-failure retry: automatic: limit: 1 @@ -228,3 +344,27 @@ steps: image: "eosio/ci:amazonlinux" workdir: /data/job timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":aws: NP Tests" + agents: + - "role=linux-tester" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + docker#v1.4.0: + image: "eosio/ci:amazonlinux" + workdir: /data/job + timeout: 60 From d0b43a5d24296b89cf2a78cc3bb8681367e7f04e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 14 Sep 2018 15:35:54 -0500 Subject: [PATCH 158/194] Fixed error in previous commit needed to pass self. GH #5674 --- tests/Node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 0bb7e861440..a6d8c5005d2 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -296,7 +296,7 @@ def getTransaction(self, transOrTransId, silentErrors=False, exitOnError=False, if trans is not None or not delayedRetry: return trans if blockWalker is None: - blockWalker=Node.BlockWalker(node, trans) + blockWalker=Node.BlockWalker(self, trans) if Utils.Debug: Utils.Print("Could not find transaction with id %s, delay and retry" % (transId)) time.sleep(timeout) From 56d7d27bbe14469a23591b70a3d453cff139b263 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 14 Sep 2018 16:59:29 -0500 Subject: [PATCH 159/194] Moved trusted_producer_light_validation flag out of config to impl and added trusted_producer to reflection macro. GH #5268 --- libraries/chain/controller.cpp | 9 +++++---- libraries/chain/include/eosio/chain/controller.hpp | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 72f756de455..7bae8b75207 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -106,6 +106,7 @@ struct controller_impl { db_read_mode read_mode = db_read_mode::SPECULATIVE; bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped optional subjective_cpu_leeway; + bool trusted_producer_light_validation = false; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -1020,8 +1021,8 @@ struct controller_impl { void push_block( const signed_block_ptr& b, controller::block_status s ) { EOS_ASSERT(!pending, block_validate_exception, "it is not valid to push a block when there is a pending block"); - auto reset_prod_light_validation = fc::make_scoped_exit([old_value=conf.trusted_producer_light_validation, this]() { - conf.trusted_producer_light_validation = old_value; + auto reset_prod_light_validation = fc::make_scoped_exit([old_value=trusted_producer_light_validation, this]() { + trusted_producer_light_validation = old_value; }); try { EOS_ASSERT( b, block_validate_exception, "trying to push empty block" ); @@ -1030,7 +1031,7 @@ struct controller_impl { bool trust = !conf.force_all_checks && (s == controller::block_status::irreversible || s == controller::block_status::validated); auto new_header_state = fork_db.add( b, trust ); if (conf.trusted_producers.count(b->producer)) { - conf.trusted_producer_light_validation = true; + trusted_producer_light_validation = true; }; emit( self.accepted_block_header, new_header_state ); // on replay irreversible is not emitted by fork database, so emit it explicitly here @@ -1670,7 +1671,7 @@ bool controller::light_validation_allowed(bool replay_opts_disabled_by_policy) c // OR in a signed block and in light validation mode const bool consider_skipping_on_validate = (pb_status == block_status::complete && - (my->conf.block_validation_mode == validation_mode::LIGHT || my->conf.trusted_producer_light_validation)); + (my->conf.block_validation_mode == validation_mode::LIGHT || my->trusted_producer_light_validation)); return consider_skipping_on_replay || consider_skipping_on_validate; } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 265b2e303fe..6b06de9b80f 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -75,7 +75,6 @@ namespace eosio { namespace chain { flat_set resource_greylist; flat_set trusted_producers; - bool trusted_producer_light_validation = false; }; enum class block_status { @@ -307,4 +306,5 @@ FC_REFLECT( eosio::chain::controller::config, (genesis) (wasm_runtime) (resource_greylist) + (trusted_producers) ) From 7af378a10a07ea521cbb231b08427cd45c0e5bbe Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 14 Sep 2018 17:38:23 -0400 Subject: [PATCH 160/194] Remove old config items from launcher --- programs/eosio-launcher/main.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 6654b581649..0f244f7a968 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1045,8 +1045,6 @@ launcher_def::write_config_file (tn_node_def &node) { } cfg << "blocks-dir = " << block_dir << "\n"; - cfg << "readonly = 0\n"; - cfg << "send-whole-blocks = true\n"; cfg << "http-server-address = " << host->host_name << ":" << instance.http_port << "\n"; cfg << "http-validate-host = false\n"; if (p2p == p2p_plugin::NET) { From 6a86fe0ff493e3db80acaf922c37c9d05d01acf8 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 14 Sep 2018 21:36:36 -0400 Subject: [PATCH 161/194] Remove secp256k1 building from dockerfile --- Docker/builder/Dockerfile | 7 ------- 1 file changed, 7 deletions(-) diff --git a/Docker/builder/Dockerfile b/Docker/builder/Dockerfile index 3eec3d9f601..ef8b8a7e03f 100644 --- a/Docker/builder/Dockerfile +++ b/Docker/builder/Dockerfile @@ -56,13 +56,6 @@ RUN wget https://github.com/WebAssembly/binaryen/archive/1.37.21.tar.gz -O - | t && cmake --build build --target install \ && cd .. && rm -rf binaryen-1.37.21 -RUN git clone --depth 1 https://github.com/cryptonomex/secp256k1-zkp \ - && cd secp256k1-zkp \ - && ./autogen.sh \ - && ./configure --prefix=/usr/local \ - && make -j$(nproc) install \ - && cd .. && rm -rf secp256k1-zkp - RUN git clone --depth 1 -b releases/v3.3 https://github.com/mongodb/mongo-cxx-driver \ && cd mongo-cxx-driver/build \ && cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. \ From b89a003502a3bbab405ad003b1d23c3e4372e01d Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 14 Sep 2018 21:37:08 -0400 Subject: [PATCH 162/194] (fc sync) secp256k1 as submodule --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index a4a4f20a9db..8edb92dd231 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit a4a4f20a9db606319330e605cbead806f965dda5 +Subproject commit 8edb92dd2310108b8eb66d010b84ca4fc9dce898 From b139e68ae7a1d1bf112f88d47ec63387b28d440a Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 16 Sep 2018 12:04:34 -0400 Subject: [PATCH 163/194] Fix trivial error message print in cleos http --- programs/cleos/httpc.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 552863e277b..e2196d2bc9c 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -139,7 +139,7 @@ namespace eosio { namespace client { namespace http { boost::system::error_code ec; auto result = resolver.resolve(tcp::v4(), url.server, url.port, ec); if (ec) { - EOS_THROW(fail_to_resolve_host, "Error resolving \"${server}:${url}\" : ${m}", ("server", url.server)("port",url.port)("m",ec.message())); + EOS_THROW(fail_to_resolve_host, "Error resolving \"${server}:${port}\" : ${m}", ("server", url.server)("port",url.port)("m",ec.message())); } // non error results are guaranteed to return a non-empty range From 8db852576697d31836f9073521fe76a2fe02115b Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Mon, 17 Sep 2018 09:48:57 -0400 Subject: [PATCH 164/194] abi version switcheroo --- contracts/payloadless/payloadless.abi | 2 +- contracts/test_ram_limit/test_ram_limit.abi | 2 +- programs/eosio-abigen/main.cpp | 2 +- unittests/abi_tests.cpp | 68 ++++++++++----------- 4 files changed, 37 insertions(+), 37 deletions(-) diff --git a/contracts/payloadless/payloadless.abi b/contracts/payloadless/payloadless.abi index 6ba0f1c4aa4..1ea79c6f275 100644 --- a/contracts/payloadless/payloadless.abi +++ b/contracts/payloadless/payloadless.abi @@ -1,5 +1,5 @@ { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-04-19T09:07:16", "types": [], "structs": [{ diff --git a/contracts/test_ram_limit/test_ram_limit.abi b/contracts/test_ram_limit/test_ram_limit.abi index 3687c7593ef..9d3413b8b8e 100644 --- a/contracts/test_ram_limit/test_ram_limit.abi +++ b/contracts/test_ram_limit/test_ram_limit.abi @@ -1,6 +1,6 @@ { "____comment": "This file was generated by eosio-abigen. DO NOT EDIT - 2018-03-29T02:09:11", - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" diff --git a/programs/eosio-abigen/main.cpp b/programs/eosio-abigen/main.cpp index 689998ca624..f15cd138ee4 100644 --- a/programs/eosio-abigen/main.cpp +++ b/programs/eosio-abigen/main.cpp @@ -86,7 +86,7 @@ int main(int argc, const char **argv) { abi_def output; try { vector actions; int result = Tool.run(create_find_macro_factory(contract, actions, abi_context).get()); if(!result) { - output.version = "eosio::abi/1.1"; + output.version = "eosio::abi/1.0"; result = Tool.run(create_factory(abi_verbose, abi_opt_sfs, abi_context, output, contract, actions).get()); if(!result) { abi_serializer abis(output, fc::seconds(1)); // No risk to client side serialization taking a long time diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index d63836d2aa5..44677b261c7 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -495,7 +495,7 @@ BOOST_AUTO_TEST_CASE(uint_types) const char* currency_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "transfer", @@ -557,7 +557,7 @@ struct abi_gen_helper { std::string stdc_include_param = std::string("-I") + eosiolib_path + "/musl/upstream/include"; abi_def output; - output.version = "eosio::abi/1.1"; + output.version = "eosio::abi/1.0"; std::string contract; std::vector actions; @@ -664,7 +664,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_all_types, abi_gen_helper) const char* all_types_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "test_struct", @@ -812,7 +812,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_double_action, abi_gen_helper) const char* double_action_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name" : "A", @@ -874,7 +874,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_all_indexes, abi_gen_helper) const char* all_indexes_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "table1", @@ -1031,7 +1031,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_full_table_decl, abi_gen_helper) const char* full_table_decl_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name" : "table1", @@ -1130,7 +1130,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_template_base, abi_gen_helper) const char* template_base_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name" : "base32", @@ -1186,7 +1186,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_action_and_table, abi_gen_helper) const char* action_and_table_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name" : "table_action", @@ -1246,7 +1246,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_simple_typedef, abi_gen_helper) const char* simple_typedef_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name" : "my_base_alias", "type" : "common_params" @@ -1313,7 +1313,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_field_typedef, abi_gen_helper) const char* field_typedef_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name" : "my_complex_field_alias", "type" : "complex_field" @@ -1389,7 +1389,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_vector_of_POD, abi_gen_helper) const char* abigen_vector_of_POD_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "table1", @@ -1463,7 +1463,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_vector_of_structs, abi_gen_helper) const char* abigen_vector_of_structs_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "my_struct", @@ -1569,7 +1569,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_vector_alias, abi_gen_helper) const char* abigen_vector_alias_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "array_of_rows", "type": "row[]" @@ -1646,7 +1646,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_eosioabi_macro, abi_gen_helper) const char* abigen_eosioabi_macro_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "hi", @@ -1709,7 +1709,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_contract_inheritance, abi_gen_helper) const char* abigen_contract_inheritance_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "hi", @@ -1791,7 +1791,7 @@ BOOST_FIXTURE_TEST_CASE(abigen_no_eosioabi_macro, abi_gen_helper) const char* abigen_no_eosioabi_macro_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "hi", @@ -2061,7 +2061,7 @@ BOOST_AUTO_TEST_CASE(abi_cycle) const char* struct_cycle_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "A", @@ -2798,7 +2798,7 @@ BOOST_AUTO_TEST_CASE(packed_transaction) const char* packed_transaction_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "compression_type", "type": "int64" @@ -2881,7 +2881,7 @@ BOOST_AUTO_TEST_CASE(abi_type_repeat) const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -2942,7 +2942,7 @@ BOOST_AUTO_TEST_CASE(abi_struct_repeat) const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -2999,7 +2999,7 @@ BOOST_AUTO_TEST_CASE(abi_action_repeat) const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -3059,7 +3059,7 @@ BOOST_AUTO_TEST_CASE(abi_table_repeat) const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "actor_name", "type": "name" @@ -3122,7 +3122,7 @@ BOOST_AUTO_TEST_CASE(abi_type_def) // inifinite loop in types const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3175,7 +3175,7 @@ BOOST_AUTO_TEST_CASE(abi_type_loop) // inifinite loop in types const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3219,7 +3219,7 @@ BOOST_AUTO_TEST_CASE(abi_type_redefine) // inifinite loop in types const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "account_name" @@ -3260,7 +3260,7 @@ BOOST_AUTO_TEST_CASE(abi_type_redefine_to_name) // inifinite loop in types const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "name", "type": "name" @@ -3282,7 +3282,7 @@ BOOST_AUTO_TEST_CASE(abi_type_nested_in_vector) // inifinite loop in types const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "store_t", @@ -3308,7 +3308,7 @@ BOOST_AUTO_TEST_CASE(abi_account_name_in_eosio_abi) // inifinite loop in types const char* repeat_abi = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [{ "new_type_name": "account_name", "type": "name" @@ -3351,7 +3351,7 @@ BOOST_AUTO_TEST_CASE(abi_large_array) try { const char* abi_str = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [{ "name": "hi", @@ -3389,7 +3389,7 @@ BOOST_AUTO_TEST_CASE(abi_is_type_recursion) try { const char* abi_str = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [ { "new_type_name": "a[]", @@ -3433,7 +3433,7 @@ BOOST_AUTO_TEST_CASE(abi_recursive_structs) try { const char* abi_str = R"=====( { - "version": "eosio::abi/1.1", + "version": "eosio::abi/1.0", "types": [], "structs": [ { @@ -3529,7 +3529,7 @@ BOOST_AUTO_TEST_CASE(abi_deep_structs_validate) BOOST_AUTO_TEST_CASE(variants) { auto duplicate_variant_abi = R"({ - "version": "eosio::abi/1.0", + "version": "eosio::abi/1.1", "variants": [ {"name": "v1", "types": ["int8", "string", "bool"]}, {"name": "v1", "types": ["int8", "string", "bool"]}, @@ -3537,14 +3537,14 @@ BOOST_AUTO_TEST_CASE(variants) })"; auto variant_abi_invalid_type = R"({ - "version": "eosio::abi/1.0", + "version": "eosio::abi/1.1", "variants": [ {"name": "v1", "types": ["int91", "string", "bool"]}, ], })"; auto variant_abi = R"({ - "version": "eosio::abi/1.0", + "version": "eosio::abi/1.1", "types": [ {"new_type_name": "foo", "type": "s"}, {"new_type_name": "bar", "type": "s"}, From ecd2dbd392f4ccec7702ad9b8fc16b9756470b4b Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 17 Sep 2018 11:48:29 -0400 Subject: [PATCH 165/194] delete contents inside state directory but not the directory itself #5685 --- plugins/chain_plugin/chain_plugin.cpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 814999c8ade..3a6688c94af 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -313,6 +313,14 @@ fc::time_point calculate_genesis_timestamp( string tstr ) { return genesis_timestamp; } +void clear_directory_contents( const fc::path& p ) { + using boost::filesystem::directory_iterator; + + for( directory_iterator enditr, itr{p}; itr != enditr; ++itr ) { + fc::remove_all( itr->path() ); + } +} + void chain_plugin::plugin_initialize(const variables_map& options) { ilog("initializing chain plugin"); @@ -456,11 +464,11 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ilog( "Deleting state database and blocks" ); if( options.at( "truncate-at-block" ).as() > 0 ) wlog( "The --truncate-at-block option does not make sense when deleting all blocks." ); - fc::remove_all( my->chain_config->state_dir ); + clear_directory_contents( my->chain_config->state_dir ); fc::remove_all( my->blocks_dir ); } else if( options.at( "hard-replay-blockchain" ).as()) { ilog( "Hard replay requested: deleting state database" ); - fc::remove_all( my->chain_config->state_dir ); + clear_directory_contents( my->chain_config->state_dir ); auto backup_dir = block_log::repair_log( my->blocks_dir, options.at( "truncate-at-block" ).as()); if( fc::exists( backup_dir / config::reversible_blocks_dir_name ) || options.at( "fix-reversible-blocks" ).as()) { @@ -482,7 +490,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ilog( "Replay requested: deleting state database" ); if( options.at( "truncate-at-block" ).as() > 0 ) wlog( "The --truncate-at-block option does not work for a regular replay of the blockchain." ); - fc::remove_all( my->chain_config->state_dir ); + clear_directory_contents( my->chain_config->state_dir ); if( options.at( "fix-reversible-blocks" ).as()) { if( !recover_reversible_blocks( my->chain_config->blocks_dir / config::reversible_blocks_dir_name, my->chain_config->reversible_cache_size )) { From 37a96777c751a71d65e49f87eb1781f0d58d23e9 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Mon, 17 Sep 2018 12:05:17 -0400 Subject: [PATCH 166/194] add deprecation notice to eosiocpp --- tools/eosiocpp.in | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/eosiocpp.in b/tools/eosiocpp.in index 3e8f84e96d7..dd56f43dd1d 100755 --- a/tools/eosiocpp.in +++ b/tools/eosiocpp.in @@ -150,6 +150,11 @@ function print_help { echo " Generate the ABI specification file [EXPERIMENTAL]" } +function print_deprecation_notice { + echo -e "\033[0;33mWARNING: this tool is deprecated and will be removed in a future release\033[0m" 1>&2 + echo -e "\033[0;33mPlease consider using the EOSIO.CDT (https://github.com/EOSIO/eosio.cdt/)\033[0m" 1>&2 +} + command="" while [[ $# -gt 1 ]] @@ -185,6 +190,8 @@ case $key in esac done +print_deprecation_notice + if [[ "outname" == "$command" ]]; then build_contract $@ elif [[ "newcontract" == "$command" ]]; then From 9f16ed39ca52e9b7ea5d7de1519acf527726dc11 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 17 Sep 2018 12:41:49 -0400 Subject: [PATCH 167/194] Make sure eosio_build.sh script checks recursive submodules --- eosio_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eosio_build.sh b/eosio_build.sh index 59c76b0b54e..1c32a5ec86f 100755 --- a/eosio_build.sh +++ b/eosio_build.sh @@ -120,7 +120,7 @@ pushd "${SOURCE_DIR}" &> /dev/null - STALE_SUBMODS=$(( $(git submodule status | grep -c "^[+\-]") )) + STALE_SUBMODS=$(( $(git submodule status --recursive | grep -c "^[+\-]") )) if [ $STALE_SUBMODS -gt 0 ]; then printf "\\n\\tgit submodules are not up to date.\\n" printf "\\tPlease run the command 'git submodule update --init --recursive'.\\n" From 3590af77ec3099fcb17d210746530cf08be38c81 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 17 Sep 2018 13:45:30 -0400 Subject: [PATCH 168/194] clear_directory_contents should be no-op if folder does not exist #5685 --- plugins/chain_plugin/chain_plugin.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 3a6688c94af..75ce0ca7d0c 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -316,6 +316,9 @@ fc::time_point calculate_genesis_timestamp( string tstr ) { void clear_directory_contents( const fc::path& p ) { using boost::filesystem::directory_iterator; + if( !fc::is_directory( p ) ) + return; + for( directory_iterator enditr, itr{p}; itr != enditr; ++itr ) { fc::remove_all( itr->path() ); } From 15cd9c62f9f9432a01f6ca64ec4ea565bc511b45 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 14 Sep 2018 10:54:38 -0500 Subject: [PATCH 169/194] Add filter-on ability to accept blank action as wildcard --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 22 +++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 3e26770d418..52ca2b45d1c 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -214,11 +214,14 @@ const std::string mongo_db_plugin_impl::account_controls_col = "account_controls bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_trace ) const { bool include = false; - if( filter_on_star || filter_on.find( {action_trace.receipt.receiver, action_trace.act.name, 0} ) != filter_on.end() ) { + if( filter_on_star || + filter_on.find( {action_trace.receipt.receiver, 0, 0} ) != filter_on.end() || + filter_on.find( {action_trace.receipt.receiver, action_trace.act.name, 0} ) != filter_on.end() ) { include = true; } else { for( const auto& a : action_trace.act.authorization ) { - if( filter_on.find( {action_trace.receipt.receiver, action_trace.act.name, a.actor} ) != filter_on.end() ) { + if( filter_on.find( {action_trace.receipt.receiver, 0, a.actor} ) != filter_on.end() || + filter_on.find( {action_trace.receipt.receiver, action_trace.act.name, a.actor} ) != filter_on.end() ) { include = true; break; } @@ -227,14 +230,13 @@ bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_tra if( !include ) { return false; } - if( filter_out.find( {action_trace.receipt.receiver, 0, 0} ) != filter_out.end() ) { - return false; - } - if( filter_out.find( {action_trace.receipt.receiver, action_trace.act.name, 0} ) != filter_out.end() ) { + if( filter_out.find( {action_trace.receipt.receiver, 0, 0} ) != filter_out.end() || + filter_out.find( {action_trace.receipt.receiver, action_trace.act.name, 0} ) != filter_out.end() ) { return false; } for( const auto& a : action_trace.act.authorization ) { - if( filter_out.find( {action_trace.receipt.receiver, action_trace.act.name, a.actor} ) != filter_out.end() ) { + if( filter_out.find( {action_trace.receipt.receiver, 0, a.actor} ) != filter_out.end() || + filter_out.find( {action_trace.receipt.receiver, action_trace.act.name, a.actor} ) != filter_out.end() ) { return false; } } @@ -1422,9 +1424,9 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc ("mongodb-store-action-traces", bpo::value()->default_value(true), "Enables storing action traces in mongodb.") ("mongodb-filter-on", bpo::value>()->composing(), - "Mongodb: Track actions which match receiver:action:actor. Actor may be blank to include all. Receiver and Action may not be blank. Default is * include everything.") + "Mongodb: Track actions which match receiver:action:actor. Actor may be blank to include all. Action and Actor both blank allows all from Receiver. Receiver may not be blank.") ("mongodb-filter-out", bpo::value>()->composing(), - "Mongodb: Do not track actions which match receiver:action:actor. Action and Actor both blank excludes all from reciever. Actor blank excludes all from reciever:action. Receiver may not be blank.") + "Mongodb: Do not track actions which match receiver:action:actor. Action and Actor both blank excludes all from Receiver. Actor blank excludes all from receiver:action. Receiver may not be blank.") ; } @@ -1487,7 +1489,7 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) boost::split( v, s, boost::is_any_of( ":" )); EOS_ASSERT( v.size() == 3, fc::invalid_arg_exception, "Invalid value ${s} for --mongodb-filter-on", ("s", s)); filter_entry fe{v[0], v[1], v[2]}; - EOS_ASSERT( fe.receiver.value && fe.action.value, fc::invalid_arg_exception, + EOS_ASSERT( fe.receiver.value, fc::invalid_arg_exception, "Invalid value ${s} for --mongodb-filter-on", ("s", s)); my->filter_on.insert( fe ); } From 1eb7e9c52570031f885b240cd0d88ee0d6b94399 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 17 Sep 2018 08:38:46 -0500 Subject: [PATCH 170/194] Update doc from peer review --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 52ca2b45d1c..ea3a4a247a6 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1424,7 +1424,7 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc ("mongodb-store-action-traces", bpo::value()->default_value(true), "Enables storing action traces in mongodb.") ("mongodb-filter-on", bpo::value>()->composing(), - "Mongodb: Track actions which match receiver:action:actor. Actor may be blank to include all. Action and Actor both blank allows all from Receiver. Receiver may not be blank.") + "Mongodb: Track actions which match receiver:action:actor. Action and/or Actor may be blank to include all. Action and Actor both blank allows all from Receiver. Receiver may not be blank.") ("mongodb-filter-out", bpo::value>()->composing(), "Mongodb: Do not track actions which match receiver:action:actor. Action and Actor both blank excludes all from Receiver. Actor blank excludes all from receiver:action. Receiver may not be blank.") ; From c67c676641437f1564cd81bc2fc39a0045a80300 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 17 Sep 2018 12:46:08 -0500 Subject: [PATCH 171/194] Add wildcard support of receiver --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 58 ++++++++++++--------- 1 file changed, 34 insertions(+), 24 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index ea3a4a247a6..e335eb0f7a0 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -53,11 +53,16 @@ struct filter_entry { name receiver; name action; name actor; - std::tuple key() const { - return std::make_tuple(receiver, action, actor); - } + friend bool operator<( const filter_entry& a, const filter_entry& b ) { - return a.key() < b.key(); + return std::tie( a.receiver, a.action, a.actor ) < std::tie( b.receiver, b.action, b.actor ); + } + + // receiver action actor + bool match( const name& rr, const name& an, const name& ar ) const { + return (receiver.value == 0 || receiver == rr) && + (action.value == 0 || action == an) && + (actor.value == 0 || actor == ar); } }; @@ -214,32 +219,41 @@ const std::string mongo_db_plugin_impl::account_controls_col = "account_controls bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_trace ) const { bool include = false; - if( filter_on_star || - filter_on.find( {action_trace.receipt.receiver, 0, 0} ) != filter_on.end() || - filter_on.find( {action_trace.receipt.receiver, action_trace.act.name, 0} ) != filter_on.end() ) { + if( filter_on_star ) { include = true; } else { - for( const auto& a : action_trace.act.authorization ) { - if( filter_on.find( {action_trace.receipt.receiver, 0, a.actor} ) != filter_on.end() || - filter_on.find( {action_trace.receipt.receiver, action_trace.act.name, a.actor} ) != filter_on.end() ) { - include = true; - break; + auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace]( const auto& filter ) { + return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 ); + } ); + if( itr != filter_on.cend() ) { + include = true; + } else { + for( const auto& a : action_trace.act.authorization ) { + auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace, &a]( const auto& filter ) { + return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor ); + } ); + if( itr != filter_on.cend() ) { + include = true; + break; + } } } } if( !include ) { return false; } - if( filter_out.find( {action_trace.receipt.receiver, 0, 0} ) != filter_out.end() || - filter_out.find( {action_trace.receipt.receiver, action_trace.act.name, 0} ) != filter_out.end() ) { - return false; - } + auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace]( const auto& filter ) { + return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 ); + } ); + if( itr != filter_out.cend() ) { return false; } + for( const auto& a : action_trace.act.authorization ) { - if( filter_out.find( {action_trace.receipt.receiver, 0, a.actor} ) != filter_out.end() || - filter_out.find( {action_trace.receipt.receiver, action_trace.act.name, a.actor} ) != filter_out.end() ) { - return false; - } + auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace, &a]( const auto& filter ) { + return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor ); + } ); + if( itr != filter_out.cend() ) { return false; } } + return true; } @@ -1489,8 +1503,6 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) boost::split( v, s, boost::is_any_of( ":" )); EOS_ASSERT( v.size() == 3, fc::invalid_arg_exception, "Invalid value ${s} for --mongodb-filter-on", ("s", s)); filter_entry fe{v[0], v[1], v[2]}; - EOS_ASSERT( fe.receiver.value, fc::invalid_arg_exception, - "Invalid value ${s} for --mongodb-filter-on", ("s", s)); my->filter_on.insert( fe ); } } else { @@ -1503,8 +1515,6 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) boost::split( v, s, boost::is_any_of( ":" )); EOS_ASSERT( v.size() == 3, fc::invalid_arg_exception, "Invalid value ${s} for --mongodb-filter-out", ("s", s)); filter_entry fe{v[0], v[1], v[2]}; - EOS_ASSERT( fe.receiver.value, fc::invalid_arg_exception, - "Invalid value ${s} for --mongodb-filter-out", ("s", s)); my->filter_out.insert( fe ); } } From 6b15868ddb0d6cb9f010797731ea9f631c571217 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 17 Sep 2018 13:47:53 -0500 Subject: [PATCH 172/194] Updated help text --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index e335eb0f7a0..f0e27401dfe 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1438,9 +1438,9 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc ("mongodb-store-action-traces", bpo::value()->default_value(true), "Enables storing action traces in mongodb.") ("mongodb-filter-on", bpo::value>()->composing(), - "Mongodb: Track actions which match receiver:action:actor. Action and/or Actor may be blank to include all. Action and Actor both blank allows all from Receiver. Receiver may not be blank.") + "Track actions which match receiver:action:actor. Receiver, Action, & Actor may be blank to include all. i.e. eosio:: or :transfer: Use * or leave unspecified to include all.") ("mongodb-filter-out", bpo::value>()->composing(), - "Mongodb: Do not track actions which match receiver:action:actor. Action and Actor both blank excludes all from Receiver. Actor blank excludes all from receiver:action. Receiver may not be blank.") + "Do not track actions which match receiver:action:actor. Receiver, Action, & Actor may be blank to exclude all.") ; } From 03c0c27cd788c10e681f424077bcbeda1666b201 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 17 Sep 2018 13:41:53 -0500 Subject: [PATCH 173/194] More diagnostic output. GH #5674 --- tests/TestHelper.py | 7 ++++--- tests/testUtils.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 4cec2b02a03..6e00645e9dc 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -144,10 +144,11 @@ def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, kil Utils.Print("== Errors see above ==") if len(Utils.CheckOutputDeque)>0: Utils.Print("== cout/cerr pairs from last %d calls to Utils. ==" % len(Utils.CheckOutputDeque)) - for out, err in Utils.CheckOutputDeque: + for out, err, cmd in reversed(Utils.CheckOutputDeque): + Utils.Print("cmd={%s}" % (" ".join(cmd))) Utils.Print("cout={%s}" % (out)) - Utils.Print("cerr={%s}\n" % (out)) - Utils.Print("== cout/cerr pairs done. ==") + Utils.Print("cerr={%s}\n" % (err)) + Utils.Print("== cmd/cout/cerr pairs done. ==") if killEosInstances: Utils.Print("Shut down the cluster.") diff --git a/tests/testUtils.py b/tests/testUtils.py index dbc8927d485..d2a69231513 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -79,7 +79,7 @@ def checkOutput(cmd): assert(isinstance(cmd, list)) popen=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output,error)=popen.communicate() - Utils.CheckOutputDeque.append((output,error)) + Utils.CheckOutputDeque.append((output,error,cmd)) if popen.returncode != 0: raise subprocess.CalledProcessError(returncode=popen.returncode, cmd=cmd, output=error) return output.decode("utf-8") From dc69e4c58d7a8b6049460c3ec61ab1f8f5f560d9 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 17 Sep 2018 14:47:13 -0500 Subject: [PATCH 174/194] Fixed reporting block walker output. GH #5674 --- tests/Node.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index a6d8c5005d2..2795ba63c5e 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -925,6 +925,10 @@ def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, ex assert(isinstance(returnType, ReturnType)) cmd="%s %s %s" % (Utils.EosClientPath, self.eosClientArgs(), cmd) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + if exitMsg is not None: + exitMsg="Context: " + exitMsg + else: + exitMsg="" trans=None try: if returnType==ReturnType.json: @@ -936,7 +940,7 @@ def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, ex except subprocess.CalledProcessError as ex: if not silentErrors: msg=ex.output.decode("utf-8") - errorMsg="Exception during \"%s\". %s" % (cmdDesc, msg) + errorMsg="Exception during \"%s\". Exception message: %s. %s" % (cmdDesc, msg, exitMsg) if exitOnError: Utils.cmdError(errorMsg) Utils.errorExit(errorMsg) @@ -944,10 +948,6 @@ def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, ex Utils.Print("ERROR: %s" % (errorMsg)) return None - if exitMsg is not None: - exitMsg=": " + exitMsg - else: - exitMsg="" if exitOnError and trans is None: Utils.cmdError("could not \"%s\" - %s" % (cmdDesc,exitMsg)) errorExit("Failed to \"%s\"" % (cmdDesc)) From b6f0802bbc2c33b0a02730ce7b3df0237d28fd68 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 17 Sep 2018 16:44:32 -0400 Subject: [PATCH 175/194] cleos changes: fix potential bug in `to_asset`; minor changes to `create_open` --- programs/cleos/main.cpp | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 771126fc9e1..7f3d590793b 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -217,7 +217,7 @@ void add_standard_transaction_options(CLI::App* cmd, string default_permission = cmd->add_option("--max-cpu-usage-ms", tx_max_cpu_usage, localized("set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit)")); cmd->add_option("--max-net-usage", tx_max_net_usage, localized("set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit)")); - + cmd->add_option("--delay-sec", delaysec, localized("set the delay_sec seconds, defaults to 0s")); } @@ -560,13 +560,12 @@ fc::variant regproducer_variant(const account_name& producer, const public_key_t ; } -chain::action create_open(const string& contract, const name& owner, asset amount, const name& ram_payer) { +chain::action create_open(const string& contract, const name& owner, symbol sym, const name& ram_payer) { auto open_ = fc::mutable_variant_object ("owner", owner) - ("symbol", amount.get_symbol()) + ("symbol", sym) ("ram_payer", ram_payer); - - return action { + return action { tx_permission.empty() ? vector{{ram_payer,config::active_name}} : get_account_permissions(tx_permission), contract, "open", variant_to_bin( contract, N(open), open_ ) }; @@ -646,11 +645,11 @@ authority parse_json_authority_or_key(const std::string& authorityJsonOrFile) { } } -asset to_asset( const string& code, const string& s ) { - static map cache; +asset to_asset( account_name code, const string& s ) { + static map< pair, eosio::chain::symbol> cache; auto a = asset::from_string( s ); eosio::chain::symbol_code sym = a.get_symbol().to_symbol_code(); - auto it = cache.find( sym ); + auto it = cache.find( make_pair(code, sym) ); auto sym_str = a.symbol_name(); if ( it == cache.end() ) { auto json = call(get_currency_stats_func, fc::mutable_variant_object("json", false) @@ -661,7 +660,7 @@ asset to_asset( const string& code, const string& s ) { auto obj_it = obj.find( sym_str ); if (obj_it != obj.end()) { auto result = obj_it->value().as(); - auto p = cache.insert(make_pair( sym, result.max_supply.get_symbol() )); + auto p = cache.emplace( make_pair( code, sym ), result.max_supply.get_symbol() ); it = p.first; } else { EOS_THROW(symbol_type_exception, "Symbol ${s} is not supported by token contract ${c}", ("s", sym_str)("c", code)); @@ -679,7 +678,7 @@ asset to_asset( const string& code, const string& s ) { } inline asset to_asset( const string& s ) { - return to_asset( "eosio.token", s ); + return to_asset( N(eosio.token), s ); } struct set_account_permission_subcommand { @@ -1368,7 +1367,7 @@ struct buyram_subcommand { ("payer", from_str) ("receiver", receiver_str) ("bytes", fc::to_uint64(amount) * 1024ull); - send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyrambytes), act_payload)}); + send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyrambytes), act_payload)}); } else { fc::variant act_payload = fc::mutable_variant_object() ("payer", from_str) @@ -2235,7 +2234,7 @@ int main( int argc, char** argv ) { auto getSchedule = get_schedule_subcommand{get}; auto getTransactionId = get_transaction_id_subcommand{get}; - + /* auto getTransactions = get->add_subcommand("transactions", localized("Retrieve all transactions with specific account name referenced in their scope"), false); getTransactions->add_option("account_name", account_name, localized("name of account to query on"))->required(); @@ -2436,11 +2435,12 @@ int main( int argc, char** argv ) { tx_force_unique = false; } - auto transfer = create_transfer(con, sender, recipient, to_asset(con, amount), memo); + auto transfer_amount = to_asset(con, amount); + auto transfer = create_transfer(con, sender, recipient, transfer_amount, memo); if (!pay_ram) { send_actions( { transfer }); } else { - auto open_ = create_open(con, recipient, to_asset(con, amount), sender); + auto open_ = create_open(con, recipient, transfer_amount.get_symbol(), sender); send_actions( { open_, transfer } ); } }); From f2f8565a858b83994974dbc1c95c1cc0f7d243cb Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 17 Sep 2018 16:50:43 -0400 Subject: [PATCH 176/194] added wabt and new path for secp256k1 --- CMakeModules/EosioTester.cmake.in | 3 +++ CMakeModules/EosioTesterBuild.cmake.in | 3 +++ libraries/fc | 2 +- libraries/wabt | 2 +- 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 48dcddffa70..615fb078e88 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -50,6 +50,7 @@ endif() find_library(libbinaryen binaryen @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwasm WASM @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwast WAST @CMAKE_INSTALL_FULL_LIBDIR@) +find_library(libwabt wabt @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libir IR @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libplatform Platform @CMAKE_INSTALL_FULL_LIBDIR@) find_library(liblogging Logging @CMAKE_INSTALL_FULL_LIBDIR@) @@ -59,6 +60,7 @@ find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib) find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib) find_library(libchainbase chainbase @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libbuiltins builtins @CMAKE_INSTALL_FULL_LIBDIR@) +find_library(libsecp256k1 secp256k1 @CMAKE_INSTALL_FULL_LIBDIR@) macro(add_eosio_test test_name) add_executable( ${test_name} ${ARGN} ) @@ -70,6 +72,7 @@ macro(add_eosio_test test_name) ${libbinaryen} ${libwast} ${libwasm} + ${libwabt} ${libruntime} ${libplatform} ${libir} diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 06451ff1650..d7d69e1f9b0 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -51,6 +51,7 @@ find_library(libbinaryen binaryen @CMAKE_BINARY_DIR@/externals/binaryen/lib) find_library(libwasm WASM @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/WASM) find_library(libwast WAST @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/WAST) find_library(libir IR @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/IR) +find_library(libwabt wabt @CMAKE_BINARY_DIR@/libraries/wabt) find_library(libplatform Platform @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/Platform) find_library(liblogging Logging @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/Logging) find_library(libruntime Runtime @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/Runtime) @@ -59,6 +60,7 @@ find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib) find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib) find_library(libchainbase chainbase @CMAKE_BINARY_DIR@/libraries/chainbase) find_library(libbuiltins builtins @CMAKE_BINARY_DIR@/libraries/builtins) +find_library(libsecp256k1 secp256k1 @CMAKE_BINARY_DIR@/libraries/fc/secp256k1) macro(add_eosio_test test_name) add_executable( ${test_name} ${ARGN} ) @@ -70,6 +72,7 @@ macro(add_eosio_test test_name) ${libbinaryen} ${libwast} ${libwasm} + ${libwabt} ${libruntime} ${libplatform} ${libir} diff --git a/libraries/fc b/libraries/fc index 8edb92dd231..b3816115eb5 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 8edb92dd2310108b8eb66d010b84ca4fc9dce898 +Subproject commit b3816115eb5452dbce1c62c0cb7c2d98233ae0f2 diff --git a/libraries/wabt b/libraries/wabt index 67381cbe17e..347e8d2bc8e 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit 67381cbe17e0ef87d40f3376e99aea7fff0fa0b1 +Subproject commit 347e8d2bc8e30c1466d46654b84537440b6cd5a3 From a6f589c338d80d076bcb3f0429d2e5c3f10d105a Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 17 Sep 2018 16:58:35 -0400 Subject: [PATCH 177/194] no need to build wabt executables --- libraries/wabt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wabt b/libraries/wabt index 347e8d2bc8e..e3739c3a7e8 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit 347e8d2bc8e30c1466d46654b84537440b6cd5a3 +Subproject commit e3739c3a7e8049020580500ffe1dc9ff809fe0e8 From db9ec99d474e226cabc69c16678d7365461e7527 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 17 Sep 2018 17:06:06 -0400 Subject: [PATCH 178/194] removed installation of man pages for wabt tools --- libraries/wabt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wabt b/libraries/wabt index e3739c3a7e8..34253f70a34 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit e3739c3a7e8049020580500ffe1dc9ff809fe0e8 +Subproject commit 34253f70a34e906a2a817def723e2dd1380b8b89 From c7505690370701e067ecf3e5e14f9d0999f707f6 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 17 Sep 2018 18:24:44 -0400 Subject: [PATCH 179/194] fix core_symbol issues with get account; also print out account creation time during cleos get account --- plugins/chain_plugin/chain_plugin.cpp | 46 ++++++++++++++++++- .../eosio/chain_plugin/chain_plugin.hpp | 2 + programs/cleos/main.cpp | 13 ++++-- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index c0501b9c6da..183e7a48ecc 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1644,16 +1644,18 @@ read_only::get_account_results read_only::get_account( const get_account_params& const auto token_code = N(eosio.token); + auto core_symbol = extract_core_symbol(); + const auto* t_id = d.find(boost::make_tuple( token_code, params.account_name, N(accounts) )); if( t_id != nullptr ) { const auto &idx = d.get_index(); - auto it = idx.find(boost::make_tuple( t_id->id, symbol().to_symbol_code() )); + auto it = idx.find(boost::make_tuple( t_id->id, core_symbol.to_symbol_code() )); if( it != idx.end() && it->value.size() >= sizeof(asset) ) { asset bal; fc::datastream ds(it->value.data(), it->value.size()); fc::raw::unpack(ds, bal); - if( bal.get_symbol().valid() && bal.get_symbol() == symbol() ) { + if( bal.get_symbol().valid() && bal.get_symbol() == core_symbol ) { result.core_liquid_balance = bal; } } @@ -1766,6 +1768,46 @@ read_only::get_transaction_id_result read_only::get_transaction_id( const read_o return params.id(); } +namespace detail { + struct ram_market_exchange_state_t { + asset ignore1; + asset ignore2; + double ignore3; + asset core_symbol; + double ignore4; + }; +} + +chain::symbol read_only::extract_core_symbol()const { + symbol core_symbol; // Default to CORE_SYMBOL if the appropriate data structure cannot be found in the system contract table data + + // The following code makes assumptions about the contract deployed on eosio account (i.e. the system contract) and how it stores its data. + const auto& d = db.db(); + const auto* t_id = d.find(boost::make_tuple( N(eosio), N(eosio), N(rammarket) )); + if( t_id != nullptr ) { + const auto &idx = d.get_index(); + auto it = idx.find(boost::make_tuple( t_id->id, eosio::chain::string_to_symbol_c(4,"RAMCORE") )); + if( it != idx.end() ) { + detail::ram_market_exchange_state_t ram_market_exchange_state; + + fc::datastream ds( it->value.data(), it->value.size() ); + + try { + fc::raw::unpack(ds, ram_market_exchange_state); + } catch( ... ) { + return core_symbol; + } + + if( ram_market_exchange_state.core_symbol.get_symbol().valid() ) { + core_symbol = ram_market_exchange_state.core_symbol.get_symbol(); + } + } + } + + return core_symbol; +} } // namespace chain_apis } // namespace eosio + +FC_REFLECT( eosio::chain_apis::detail::ram_market_exchange_state_t, (ignore1)(ignore2)(ignore3)(core_symbol)(ignore4) ) diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index d4f4d49ea6e..5a1f7fc3ff8 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -512,6 +512,8 @@ class read_only { return result; } + chain::symbol extract_core_symbol()const; + friend struct resolver_factory; }; diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 771126fc9e1..9e1f5bf1376 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -217,7 +217,7 @@ void add_standard_transaction_options(CLI::App* cmd, string default_permission = cmd->add_option("--max-cpu-usage-ms", tx_max_cpu_usage, localized("set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit)")); cmd->add_option("--max-net-usage", tx_max_net_usage, localized("set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit)")); - + cmd->add_option("--delay-sec", delaysec, localized("set the delay_sec seconds, defaults to 0s")); } @@ -1368,7 +1368,7 @@ struct buyram_subcommand { ("payer", from_str) ("receiver", receiver_str) ("bytes", fc::to_uint64(amount) * 1024ull); - send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyrambytes), act_payload)}); + send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyrambytes), act_payload)}); } else { fc::variant act_payload = fc::mutable_variant_object() ("payer", from_str) @@ -1480,6 +1480,13 @@ void get_account( const string& accountName, bool json_format ) { asset staked; asset unstaking; + if( res.core_liquid_balance.valid() ) { + unstaking = asset( 0, res.core_liquid_balance->get_symbol() ); // Correct core symbol for unstaking asset. + staked = asset( 0, res.core_liquid_balance->get_symbol() ); // Correct core symbol for staked asset. + } + + std::cout << "created: " << string(res.created) << std::endl; + if(res.privileged) std::cout << "privileged: true" << std::endl; constexpr size_t indent_size = 5; @@ -2235,7 +2242,7 @@ int main( int argc, char** argv ) { auto getSchedule = get_schedule_subcommand{get}; auto getTransactionId = get_transaction_id_subcommand{get}; - + /* auto getTransactions = get->add_subcommand("transactions", localized("Retrieve all transactions with specific account name referenced in their scope"), false); getTransactions->add_option("account_name", account_name, localized("name of account to query on"))->required(); From 4dd970a7066ba29131cd58635f2bc2afd1d7a6ef Mon Sep 17 00:00:00 2001 From: Greg Lee Date: Tue, 18 Sep 2018 05:11:41 -0400 Subject: [PATCH 180/194] Create community plugin list --- plugins/COMMUNITY.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 plugins/COMMUNITY.md diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md new file mode 100644 index 00000000000..d7baf7e4a35 --- /dev/null +++ b/plugins/COMMUNITY.md @@ -0,0 +1,14 @@ +# Community Plugin List + +This file contains a list of community authored plugins for `nodeos`, acting as a directory of the plugins that are available. + +Third parties are encouraged to make pull requests to this file (`develop` branch please) in order to list new plugins. + +| Description | URL | +| ----------- | --- | +| Watch for specific actions and send them to an HTTP URL | https://github.com/eosauthority/eosio-watcher-plugin | +| Kafka | https://github.com/TP-Lab/kafka_plugin | + +## DISCLAIMER: + +The fact that a plugin is listed in this file does not mean the plugin has been reviewed by this repository's maintainers. No warranties are made, i.e. you are at your own risk if you choose to use them. From 1fa882a98c3fa2b318fb0ec05ddce329cb097e0c Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 18 Sep 2018 11:12:43 -0400 Subject: [PATCH 181/194] added GMP for libtester --- CMakeModules/EosioTester.cmake.in | 7 +++++++ CMakeModules/EosioTesterBuild.cmake.in | 7 +++++++ libraries/CMakeLists.txt | 1 + libraries/wabt | 2 +- 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 615fb078e88..d2ca3afdf41 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -61,6 +61,12 @@ find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib) find_library(libchainbase chainbase @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libbuiltins builtins @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libsecp256k1 secp256k1 @CMAKE_INSTALL_FULL_LIBDIR@) +find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir + HINTS ENV GMP_LIB_DIR + ENV GMP_DIR + PATH_SUFFIXES lib + DOC "Path to the GMP library" +) macro(add_eosio_test test_name) add_executable( ${test_name} ${ARGN} ) @@ -82,6 +88,7 @@ macro(add_eosio_test test_name) ${liblogging} ${libchainbase} ${libbuiltins} + ${GMP_LIBRARIES} ${libsecp256k1} LLVMX86Disassembler diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index d7d69e1f9b0..fecd6c081ca 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -61,6 +61,12 @@ find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib) find_library(libchainbase chainbase @CMAKE_BINARY_DIR@/libraries/chainbase) find_library(libbuiltins builtins @CMAKE_BINARY_DIR@/libraries/builtins) find_library(libsecp256k1 secp256k1 @CMAKE_BINARY_DIR@/libraries/fc/secp256k1) +find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir + HINTS ENV GMP_LIB_DIR + ENV GMP_DIR + PATH_SUFFIXES lib + DOC "Path to the GMP library" +) macro(add_eosio_test test_name) add_executable( ${test_name} ${ARGN} ) @@ -82,6 +88,7 @@ macro(add_eosio_test test_name) ${liblogging} ${libchainbase} ${libbuiltins} + ${GMP_LIBRARIES} ${libsecp256k1} LLVMX86Disassembler diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index b67c86b0ed8..78593706c7d 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -11,5 +11,6 @@ add_subdirectory( abi_generator ) #turn these off for now set(BUILD_TESTS OFF CACHE BOOL "Build GTest-based tests") +set(BUILD_TOOLS OFF CACHE BOOL "Build wabt tools") set(RUN_RE2C OFF CACHE BOOL "Run re2c") add_subdirectory( wabt ) diff --git a/libraries/wabt b/libraries/wabt index 34253f70a34..270e1f9ee63 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit 34253f70a34e906a2a817def723e2dd1380b8b89 +Subproject commit 270e1f9ee63004578c82a2fe594b81afd84c1631 From ae8edf228f3e23b558cd4f3f780eb61ee1b5da9d Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 18 Sep 2018 12:03:34 -0400 Subject: [PATCH 182/194] fix history_plugin get_transaction bug #5589 --- libraries/fc | 2 +- plugins/history_plugin/history_plugin.cpp | 49 ++++++++++++------- .../eosio/history_plugin/history_plugin.hpp | 20 ++------ programs/cleos/main.cpp | 7 +-- 4 files changed, 38 insertions(+), 40 deletions(-) diff --git a/libraries/fc b/libraries/fc index 8edb92dd231..bb92231736e 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 8edb92dd2310108b8eb66d010b84ca4fc9dce898 +Subproject commit bb92231736e6b1b504b5e8dc3ae33ea814a263d6 diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 6888f819b2a..808bcbde336 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -10,7 +10,7 @@ #include #include -namespace eosio { +namespace eosio { using namespace chain; using boost::signals2::scoped_connection; @@ -51,7 +51,7 @@ namespace eosio { indexed_by< ordered_unique, member>, ordered_unique, member>, - ordered_unique, + ordered_unique, composite_key< action_history_object, member, member @@ -64,7 +64,7 @@ namespace eosio { account_history_object, indexed_by< ordered_unique, member>, - ordered_unique, + ordered_unique, composite_key< account_history_object, member, member @@ -213,7 +213,7 @@ namespace eosio { uint64_t asn = 0; if( itr != idx.begin() ) --itr; - if( itr->account == n ) + if( itr->account == n ) asn = itr->account_sequence_num + 1; //idump((n)(act.receipt.global_sequence)(asn)); @@ -268,7 +268,7 @@ namespace eosio { aho.block_time = chain.pending_block_time(); aho.trx_id = at.trx_id; }); - + auto aset = account_set( at ); for( auto a : aset ) { record_account_action( a, at ); @@ -366,7 +366,7 @@ namespace eosio { - namespace history_apis { + namespace history_apis { read_only::get_actions_result read_only::get_actions( const read_only::get_actions_params& params )const { edump((params)); auto& chain = history->chain_plug->chain(); @@ -388,7 +388,7 @@ namespace eosio { pos = itr->account_sequence_num+1; } else if( itr != idx.begin() ) --itr; - if( itr->account == n ) + if( itr->account == n ) pos = itr->account_sequence_num + 1; } @@ -440,13 +440,31 @@ namespace eosio { read_only::get_transaction_result read_only::get_transaction( const read_only::get_transaction_params& p )const { auto& chain = history->chain_plug->chain(); const auto abi_serializer_max_time = history->chain_plug->get_abi_serializer_max_time(); - auto short_id = fc::variant(p.id).as_string().substr(0,8); + + transaction_id_type input_id; + auto input_id_length = p.id.size(); + try { + FC_ASSERT( input_id_length <= 64, "hex string is too long to represent an actual transaction id" ); + FC_ASSERT( input_id_length >= 8, "hex string representing transaction id should be at least 8 characters long to avoid excessive collisions" ); + input_id = transaction_id_type(p.id); + } EOS_RETHROW_EXCEPTIONS(transaction_id_type_exception, "Invalid transaction ID: ${transaction_id}", ("transaction_id", p.id)) + + auto txn_id_matched = [&input_id, input_id_size = input_id_length/2, no_half_byte_at_end = (input_id_length % 2 == 0)] + ( const transaction_id_type &id ) -> bool // hex prefix comparison + { + bool whole_byte_prefix_matches = memcmp( input_id.data(), id.data(), input_id_size ) == 0; + if( !whole_byte_prefix_matches || no_half_byte_at_end ) + return whole_byte_prefix_matches; + + // check if half byte at end of specified part of input_id matches + return (*(input_id.data() + input_id_size) & 0xF0) == (*(id.data() + input_id_size) & 0xF0); + }; const auto& db = chain.db(); const auto& idx = db.get_index(); - auto itr = idx.lower_bound( boost::make_tuple(p.id) ); + auto itr = idx.lower_bound( boost::make_tuple( input_id ) ); - bool in_history = (itr != idx.end() && fc::variant(itr->trx_id).as_string().substr(0,8) == short_id ); + bool in_history = (itr != idx.end() && txn_id_matched(itr->trx_id) ); if( !in_history && !p.block_num_hint ) { EOS_THROW(tx_not_found, "Transaction ${id} not found in history and no block hint was given", ("id",p.id)); @@ -454,12 +472,9 @@ namespace eosio { get_transaction_result result; - if (in_history) { - result.id = p.id; - result.last_irreversible_block = chain.last_irreversible_block_num(); - - + if( in_history ) { result.id = itr->trx_id; + result.last_irreversible_block = chain.last_irreversible_block_num(); result.block_num = itr->block_num; result.block_time = itr->block_time; @@ -509,7 +524,7 @@ namespace eosio { if (receipt.trx.contains()) { auto& pt = receipt.trx.get(); auto mtrx = transaction_metadata(pt); - if (fc::variant(mtrx.id).as_string().substr(0, 8) == short_id) { + if( txn_id_matched(mtrx.id) ) { result.id = mtrx.id; result.last_irreversible_block = chain.last_irreversible_block_num(); result.block_num = *p.block_num_hint; @@ -522,7 +537,7 @@ namespace eosio { } } else { auto& id = receipt.trx.get(); - if (fc::variant(id).as_string().substr(0, 8) == short_id) { + if( txn_id_matched(id) ) { result.id = id; result.last_irreversible_block = chain.last_irreversible_block_num(); result.block_num = *p.block_num_hint; diff --git a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp index 402c0e3966d..b6801b30a29 100644 --- a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp +++ b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp @@ -30,18 +30,6 @@ class read_only { : history(history) {} - /* - struct get_transaction_params { - chain::transaction_id_type transaction_id; - }; - struct get_transaction_results { - chain::transaction_id_type transaction_id; - fc::variant transaction; - }; - get_transaction_results get_transaction(const get_transaction_params& params) const; - */ - - struct get_actions_params { chain::account_name account_name; optional pos; /// a absolute sequence positon -1 is the end/last action @@ -67,7 +55,7 @@ class read_only { struct get_transaction_params { - transaction_id_type id; + string id; optional block_num_hint; }; @@ -81,7 +69,7 @@ class read_only { }; get_transaction_result get_transaction( const get_transaction_params& )const; - + @@ -120,13 +108,13 @@ class read_only { /** * This plugin tracks all actions and keys associated with a set of configured accounts. It enables - * wallets to paginate queries for history. + * wallets to paginate queries for history. * * An action will be included in the account's history if any of the following: * - receiver * - any account named in auth list * - * A key will be linked to an account if the key is referneced in authorities of updateauth or newaccount + * A key will be linked to an account if the key is referneced in authorities of updateauth or newaccount */ class history_plugin : public plugin { public: diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 49b38d13a37..b4acd6faf72 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -2133,12 +2133,7 @@ int main( int argc, char** argv ) { getTransaction->add_option("id", transaction_id_str, localized("ID of the transaction to retrieve"))->required(); getTransaction->add_option( "-b,--block-hint", block_num_hint, localized("the block number this transaction may be in") ); getTransaction->set_callback([&] { - transaction_id_type transaction_id; - try { - while( transaction_id_str.size() < 64 ) transaction_id_str += "0"; - transaction_id = transaction_id_type(transaction_id_str); - } EOS_RETHROW_EXCEPTIONS(transaction_id_type_exception, "Invalid transaction ID: ${transaction_id}", ("transaction_id", transaction_id_str)) - auto arg= fc::mutable_variant_object( "id", transaction_id); + auto arg= fc::mutable_variant_object( "id", transaction_id_str); if ( block_num_hint > 0 ) { arg = arg("block_num_hint", block_num_hint); } From 9adf8bd0d5a56c82559a140435cc7ba8c83bc207 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 18 Sep 2018 13:12:43 -0400 Subject: [PATCH 183/194] update fc submodule --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index bb92231736e..4dc8375d7d3 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit bb92231736e6b1b504b5e8dc3ae33ea814a263d6 +Subproject commit 4dc8375d7d3e02ab1177ab5c22835f75b45c845a From 79de7765333f24af59d78ad7f2c85a5b8d0d562b Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 18 Sep 2018 09:53:01 -0400 Subject: [PATCH 184/194] add log messages when a transaction is accepted/rejected/pending that indicates producer/speculator --- plugins/producer_plugin/producer_plugin.cpp | 43 +++++++++++++++++++-- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index b25bafd1b9c..7286733432d 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -49,6 +49,9 @@ namespace fc { const fc::string logger_name("producer_plugin"); fc::logger _log; +const fc::string trx_trace_logger_name("transaction_tracing"); +fc::logger _trx_trace_log; + namespace eosio { static appbase::abstract_plugin& _producer_plugin = app().register_plugin(); @@ -341,12 +344,32 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.timestamp.to_time_point(); - auto send_response = [this, &trx, &next](const fc::static_variant& response) { + auto send_response = [this, &trx, &chain, &next](const fc::static_variant& response) { next(response); if (response.contains()) { _transaction_ack_channel.publish(std::pair(response.get(), trx)); + if (_pending_block_mode == pending_block_mode::producing) { + fc_ilog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is REJECTING tx: ${txid} : ${why} ", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_state()->header.producer) + ("txid", trx->id()) + ("why",response.get()->what())); + } else { + fc_ilog(_trx_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why} ", + ("txid", trx->id()) + ("why",response.get()->what())); + } } else { _transaction_ack_channel.publish(std::pair(nullptr, trx)); + if (_pending_block_mode == pending_block_mode::producing) { + fc_ilog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is ACCEPTING tx: ${txid}", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_state()->header.producer) + ("txid", trx->id())); + } else { + fc_ilog(_trx_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}", + ("txid", trx->id())); + } } }; @@ -373,6 +396,15 @@ class producer_plugin_impl : public std::enable_shared_from_thisexcept) { if (failure_is_subjective(*trace->except, deadline_is_subjective)) { _pending_incoming_transactions.emplace_back(trx, persist_until_expired, next); + if (_pending_block_mode == pending_block_mode::producing) { + fc_ilog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_state()->header.producer) + ("txid", trx->id())); + } else { + fc_ilog(_trx_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", + ("txid", trx->id())); + } } else { auto e_ptr = trace->except->dynamic_copy_exception(); send_response(e_ptr); @@ -648,8 +680,13 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - if(fc::get_logger_map().find(logger_name) != fc::get_logger_map().end()) { - _log = fc::get_logger_map()[logger_name]; + auto& logger_map = fc::get_logger_map(); + if(logger_map.find(logger_name) != logger_map.end()) { + _log = logger_map[logger_name]; + } + + if( logger_map.find(trx_trace_logger_name) != logger_map.end()) { + _trx_trace_log = logger_map[trx_trace_logger_name]; } ilog("producer plugin: plugin_startup() begin"); From 1ec58bea68676d86e7a53c4560bd462d391b37c1 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 18 Sep 2018 10:07:35 -0400 Subject: [PATCH 185/194] change to dlog --- plugins/producer_plugin/producer_plugin.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 7286733432d..bf0d46e3813 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -349,25 +349,25 @@ class producer_plugin_impl : public std::enable_shared_from_this()) { _transaction_ack_channel.publish(std::pair(response.get(), trx)); if (_pending_block_mode == pending_block_mode::producing) { - fc_ilog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is REJECTING tx: ${txid} : ${why} ", + fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is REJECTING tx: ${txid} : ${why} ", ("block_num", chain.head_block_num() + 1) ("prod", chain.pending_block_state()->header.producer) ("txid", trx->id()) ("why",response.get()->what())); } else { - fc_ilog(_trx_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why} ", + fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why} ", ("txid", trx->id()) ("why",response.get()->what())); } } else { _transaction_ack_channel.publish(std::pair(nullptr, trx)); if (_pending_block_mode == pending_block_mode::producing) { - fc_ilog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is ACCEPTING tx: ${txid}", + fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is ACCEPTING tx: ${txid}", ("block_num", chain.head_block_num() + 1) ("prod", chain.pending_block_state()->header.producer) ("txid", trx->id())); } else { - fc_ilog(_trx_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}", + fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}", ("txid", trx->id())); } } @@ -397,12 +397,12 @@ class producer_plugin_impl : public std::enable_shared_from_thisexcept, deadline_is_subjective)) { _pending_incoming_transactions.emplace_back(trx, persist_until_expired, next); if (_pending_block_mode == pending_block_mode::producing) { - fc_ilog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", + fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", ("block_num", chain.head_block_num() + 1) ("prod", chain.pending_block_state()->header.producer) ("txid", trx->id())); } else { - fc_ilog(_trx_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", + fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", ("txid", trx->id())); } } else { From e9ab245afbdcf40d8f54ed1cd08609e9b1b3708a Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 18 Sep 2018 14:02:39 -0400 Subject: [PATCH 186/194] add tracing for when persisted transactions are dropped, add logging to producer plugin about various start block processing statistics. --- plugins/producer_plugin/producer_plugin.cpp | 207 +++++++++++++------- 1 file changed, 133 insertions(+), 74 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index bf0d46e3813..4bf0b28cdd1 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1020,8 +1020,29 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool // remove all persisted transactions that have now expired auto& persisted_by_id = _persistent_transactions.get(); auto& persisted_by_expiry = _persistent_transactions.get(); - while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pbs->header.timestamp.to_time_point()) { - persisted_by_expiry.erase(persisted_by_expiry.begin()); + if (!persisted_by_expiry.empty()) { + int num_expired_persistent = 0; + int orig_count = _persistent_transactions.size(); + + while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pbs->header.timestamp.to_time_point()) { + auto const& txid = persisted_by_expiry.begin()->trx_id; + if (_pending_block_mode == pending_block_mode::producing) { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_state()->header.producer) + ("txid", txid)); + } else { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", + ("txid", txid)); + } + + persisted_by_expiry.erase(persisted_by_expiry.begin()); + num_expired_persistent++; + } + + fc_dlog(_log, "Processed ${n} persisted transactions, Expired ${expired}", + ("n", orig_count) + ("expired", num_expired_persistent)); } try { @@ -1059,33 +1080,46 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool } } - for (const auto& trx: apply_trxs) { - if (block_time <= fc::time_point::now()) exhausted = true; - if (exhausted) { - break; - } + if (!apply_trxs.empty()) { + int num_applied = 0; + int num_failed = 0; - try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { - deadline_is_subjective = true; - deadline = block_time; + for (const auto& trx: apply_trxs) { + if (block_time <= fc::time_point::now()) exhausted = true; + if (exhausted) { + break; } - auto trace = chain.push_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; + try { + auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + bool deadline_is_subjective = false; + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { + deadline_is_subjective = true; + deadline = block_time; + } + + auto trace = chain.push_transaction(trx, deadline); + if (trace->except) { + if (failure_is_subjective(*trace->except, deadline_is_subjective)) { + exhausted = true; + } else { + // this failed our configured maximum transaction time, we don't want to replay it + chain.drop_unapplied_transaction(trx); + num_failed++; + } } else { - // this failed our configured maximum transaction time, we don't want to replay it - chain.drop_unapplied_transaction(trx); + num_applied++; } - } - } catch ( const guard_exception& e ) { - app().get_plugin().handle_guard_exception(e); - return start_block_result::failed; - } FC_LOG_AND_DROP(); + } catch ( const guard_exception& e ) { + app().get_plugin().handle_guard_exception(e); + return start_block_result::failed; + } FC_LOG_AND_DROP(); + } + + fc_dlog(_log, "Processed ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("n", apply_trxs.size()) + ("applied", num_applied) + ("failed", num_failed)); } } @@ -1093,61 +1127,82 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool auto& blacklist_by_id = _blacklisted_transactions.get(); auto& blacklist_by_expiry = _blacklisted_transactions.get(); auto now = fc::time_point::now(); - while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) { - blacklist_by_expiry.erase(blacklist_by_expiry.begin()); - } + if(!blacklist_by_expiry.empty()) { + int num_expired = 0; + int orig_count = _blacklisted_transactions.size(); - auto scheduled_trxs = chain.get_scheduled_transactions(); - - for (const auto& trx : scheduled_trxs) { - if (block_time <= fc::time_point::now()) exhausted = true; - if (exhausted) { - break; + while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) { + blacklist_by_expiry.erase(blacklist_by_expiry.begin()); + num_expired++; } - // configurable ratio of incoming txns vs deferred txns - while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); - --orig_pending_txn_size; - _incoming_trx_weight -= 1.0; - on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - } + fc_dlog(_log, "Processed ${n} blacklisted transactions, Expired ${expired}", + ("n", orig_count) + ("expired", num_expired)); + } - if (block_time <= fc::time_point::now()) { - exhausted = true; - break; - } + auto scheduled_trxs = chain.get_scheduled_transactions(); + if (!scheduled_trxs.empty()) { + int num_applied = 0; + int num_failed = 0; + + for (const auto& trx : scheduled_trxs) { + if (block_time <= fc::time_point::now()) exhausted = true; + if (exhausted) { + break; + } - if (blacklist_by_id.find(trx) != blacklist_by_id.end()) { - continue; - } + // configurable ratio of incoming txns vs deferred txns + while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { + auto e = _pending_incoming_transactions.front(); + _pending_incoming_transactions.pop_front(); + --orig_pending_txn_size; + _incoming_trx_weight -= 1.0; + on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + } - try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { - deadline_is_subjective = true; - deadline = block_time; + if (block_time <= fc::time_point::now()) { + exhausted = true; + break; } - auto trace = chain.push_scheduled_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; + if (blacklist_by_id.find(trx) != blacklist_by_id.end()) { + continue; + } + + try { + auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + bool deadline_is_subjective = false; + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) { + deadline_is_subjective = true; + deadline = block_time; + } + + auto trace = chain.push_scheduled_transaction(trx, deadline); + if (trace->except) { + if (failure_is_subjective(*trace->except, deadline_is_subjective)) { + exhausted = true; + } else { + auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); + // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist + _blacklisted_transactions.insert(transaction_id_with_expiry{trx, expiration}); + num_failed++; + } } else { - auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); - // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist - _blacklisted_transactions.insert(transaction_id_with_expiry{trx, expiration}); + num_applied++; } - } - } catch ( const guard_exception& e ) { - app().get_plugin().handle_guard_exception(e); - return start_block_result::failed; - } FC_LOG_AND_DROP(); + } catch ( const guard_exception& e ) { + app().get_plugin().handle_guard_exception(e); + return start_block_result::failed; + } FC_LOG_AND_DROP(); + + _incoming_trx_weight += _incoming_defer_ratio; + if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; + } + + fc_dlog(_log, "Processed ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("n", scheduled_trxs.size())("applied", num_applied)("failed", num_failed)); - _incoming_trx_weight += _incoming_defer_ratio; - if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; } } @@ -1156,12 +1211,16 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool } else { // attempt to apply any pending incoming transactions _incoming_trx_weight = 0.0; - while (orig_pending_txn_size && _pending_incoming_transactions.size()) { - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); - --orig_pending_txn_size; - on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - if (block_time <= fc::time_point::now()) return start_block_result::exhausted; + + if (!_pending_incoming_transactions.empty()) { + fc_dlog(_log, "Processing ${n} pending transactions"); + while (orig_pending_txn_size && _pending_incoming_transactions.size()) { + auto e = _pending_incoming_transactions.front(); + _pending_incoming_transactions.pop_front(); + --orig_pending_txn_size; + on_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + if (block_time <= fc::time_point::now()) return start_block_result::exhausted; + } } return start_block_result::succeeded; } From 36a10ff27d13f35fa80a4630644710d3af16da69 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 18 Sep 2018 14:55:27 -0400 Subject: [PATCH 187/194] in cases where there are early outs indicate how many entries were actually processed and how many were potentially processed --- plugins/producer_plugin/producer_plugin.cpp | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 4bf0b28cdd1..59ff0992ac4 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1083,6 +1083,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool if (!apply_trxs.empty()) { int num_applied = 0; int num_failed = 0; + int num_processed = 0; for (const auto& trx: apply_trxs) { if (block_time <= fc::time_point::now()) exhausted = true; @@ -1090,6 +1091,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool break; } + num_processed++; + try { auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); bool deadline_is_subjective = false; @@ -1116,7 +1119,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool } FC_LOG_AND_DROP(); } - fc_dlog(_log, "Processed ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + fc_dlog(_log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed) ("n", apply_trxs.size()) ("applied", num_applied) ("failed", num_failed)); @@ -1145,6 +1149,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool if (!scheduled_trxs.empty()) { int num_applied = 0; int num_failed = 0; + int num_processed = 0; for (const auto& trx : scheduled_trxs) { if (block_time <= fc::time_point::now()) exhausted = true; @@ -1152,6 +1157,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool break; } + num_processed++; + // configurable ratio of incoming txns vs deferred txns while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { auto e = _pending_incoming_transactions.front(); @@ -1200,8 +1207,11 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; } - fc_dlog(_log, "Processed ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", - ("n", scheduled_trxs.size())("applied", num_applied)("failed", num_failed)); + fc_dlog(_log, "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed) + ("n", scheduled_trxs.size()) + ("applied", num_applied) + ("failed", num_failed)); } } From f9ddc42a6762f4a4a3f9e5bb9383307baedeafe1 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 18 Sep 2018 14:57:20 -0400 Subject: [PATCH 188/194] update wabt submodule --- libraries/wabt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wabt b/libraries/wabt index 270e1f9ee63..e7d6948d242 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit 270e1f9ee63004578c82a2fe594b81afd84c1631 +Subproject commit e7d6948d242a9931436c38275d59a9c6036c4095 From 89ca7128bbdad0f50711221fc548e180eb43f564 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 18 Sep 2018 14:58:54 -0400 Subject: [PATCH 189/194] sync wabt to upstream as of 2018 Sep 18 This update provides a considerable performance boost --- libraries/chain/CMakeLists.txt | 2 +- libraries/chain/include/eosio/chain/webassembly/wabt.hpp | 1 - libraries/chain/webassembly/wabt.cpp | 7 ++++--- libraries/wabt | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index d23329548a4..cf4c1be184d 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -51,7 +51,7 @@ add_library( eosio_chain ) target_link_libraries( eosio_chain eos_utilities fc chainbase Logging IR WAST WASM Runtime - wasm asmjs passes cfg ast emscripten-optimizer support softfloat builtins libwabt + wasm asmjs passes cfg ast emscripten-optimizer support softfloat builtins wabt ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" diff --git a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp index 14b49736ac7..5be568d4b01 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp @@ -7,7 +7,6 @@ #include //wabt includes -#include #include #include #include diff --git a/libraries/chain/webassembly/wabt.cpp b/libraries/chain/webassembly/wabt.cpp index 7b303505aa9..bf5e1c9d6c8 100644 --- a/libraries/chain/webassembly/wabt.cpp +++ b/libraries/chain/webassembly/wabt.cpp @@ -5,6 +5,7 @@ //wabt includes #include #include +#include namespace eosio { namespace chain { namespace webassembly { namespace wabt_runtime { @@ -87,10 +88,10 @@ std::unique_ptr wabt_runtime::instantiate_mo } interp::DefinedModule* instantiated_module = nullptr; - ErrorHandlerBuffer error_handler(Location::Type::Binary); + wabt::Errors errors; - wabt::Result res = ReadBinaryInterp(env.get(), code_bytes, code_size, read_binary_options, &error_handler, &instantiated_module); - EOS_ASSERT( Succeeded(res), wasm_execution_error, "Error building wabt interp: ${e}", ("e", error_handler.buffer()) ); + wabt::Result res = ReadBinaryInterp(env.get(), code_bytes, code_size, read_binary_options, &errors, &instantiated_module); + EOS_ASSERT( Succeeded(res), wasm_execution_error, "Error building wabt interp: ${e}", ("e", wabt::FormatErrorsToString(errors, Location::Type::Binary)) ); return std::make_unique(std::move(env), initial_memory, instantiated_module); } diff --git a/libraries/wabt b/libraries/wabt index 67381cbe17e..f628c1bd0dc 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit 67381cbe17e0ef87d40f3376e99aea7fff0fa0b1 +Subproject commit f628c1bd0dc82a4db929f994d646e1bec95c86cf From e3526542e074071aa504cdb28bdb02fcbcbdd3e2 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 18 Sep 2018 15:20:42 -0400 Subject: [PATCH 190/194] log when a producing node drops a previously accepted but unapplied transaction from consideration due to expiry --- plugins/producer_plugin/producer_plugin.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 59ff0992ac4..3f87815acd1 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1073,6 +1073,10 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool for (auto& trx: unapplied_trxs) { auto category = calculate_transaction_category(trx); if (category == tx_category::EXPIRED || (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) { + if (!_producers.empty()) { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Node with producers configured is dropping a PREVIOUSLY ACCEPTED transaction : ${txid}", + ("txid", trx->id)); + } chain.drop_unapplied_transaction(trx); } else if (category == tx_category::PERSISTED || (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) { apply_trxs.emplace_back(std::move(trx)); From eb54bbad67c1281daf2718305d5439d7c6f1e835 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 18 Sep 2018 15:32:05 -0400 Subject: [PATCH 191/194] wording --- plugins/producer_plugin/producer_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 3f87815acd1..b9fbd0489d0 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1074,7 +1074,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool auto category = calculate_transaction_category(trx); if (category == tx_category::EXPIRED || (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) { if (!_producers.empty()) { - fc_dlog(_trx_trace_log, "[TRX_TRACE] Node with producers configured is dropping a PREVIOUSLY ACCEPTED transaction : ${txid}", + fc_dlog(_trx_trace_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", ("txid", trx->id)); } chain.drop_unapplied_transaction(trx); From a74980b19076f459342cbec70d536669f53d6741 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 18 Sep 2018 17:03:14 -0400 Subject: [PATCH 192/194] bump version to 1.3.0 --- CMakeLists.txt | 4 ++-- Docker/README.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 22bd2fe9115..376cdf972ea 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,8 +26,8 @@ set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 2) -set(VERSION_PATCH 5) +set(VERSION_MINOR 3) +set(VERSION_PATCH 0) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) diff --git a/Docker/README.md b/Docker/README.md index 82eee59c523..77c3fa4cfbd 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.2.5 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.3.0 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.2.5 --build-arg branch=v1.2.5 . +docker build -t eosio/eos:v1.3.0 --build-arg branch=v1.3.0 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. From ed4966eec655d350061ead426715a71cb2007b62 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 18 Sep 2018 17:06:21 -0400 Subject: [PATCH 193/194] (wabt sync) move submodule back to eosio branch --- libraries/wabt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wabt b/libraries/wabt index f628c1bd0dc..2f5382661f7 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit f628c1bd0dc82a4db929f994d646e1bec95c86cf +Subproject commit 2f5382661f7bf77cf7a70dcf0543a44fd5025910 From 42068aae4d21a8ef3af5a1acc2d78c0a868d688d Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 18 Sep 2018 17:55:02 -0400 Subject: [PATCH 194/194] PORT Consolidated Security Fixes for 1.2.6 to 1.3.0 - Consider peers for syncing at most once Co-authored-by: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> --- plugins/net_plugin/net_plugin.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d907e7f7390..bb3b33a8f23 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1344,19 +1344,16 @@ namespace eosio { } //scan the list of peers looking for another able to provide sync blocks. - while (cptr != cend) { + auto cstart_it = cptr; + do { //select the first one which is current and break out. - if ((*cptr)->current()) { + if((*cptr)->current()) { source = *cptr; break; } - else { - // advance the iterator in a round robin fashion. - if (++cptr == my_impl->connections.end()) { + if(++cptr == my_impl->connections.end()) cptr = my_impl->connections.begin(); - } - } - } + } while(cptr != cstart_it); // no need to check the result, either source advanced or the whole list was checked and the old source is reused. } }